Skip to content

Commit

Permalink
Merge pull request #132 from emer/goal
Browse files Browse the repository at this point in the history
update to cogent lab from leabra compatible branch
  • Loading branch information
rcoreilly authored Dec 23, 2024
2 parents 9706a37 + 38ee131 commit 807532e
Show file tree
Hide file tree
Showing 131 changed files with 1,908 additions and 7,699 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Here are the other packages from [Cogent Core](https://github.com/cogentcore/cor

* [envs](https://github.com/emer/envs) has misc standalone environments that can be good starting points, including managing files, visual images, etc.

* [ttail](https://github.com/cogentcore/core/tree/main/tensor/cmd/ttail) is a `tail` program for interactively viewing tabular (csv, tsv, etc) log files in a terminal CLI environment! `go install cogentcore.org/core/tensor/cmd/ttail@latest` from anywhere to install.
* [ttail](https://github.com/cogentcore/core/tree/main/tensor/cmd/ttail) is a `tail` program for interactively viewing tabular (csv, tsv, etc) log files in a terminal CLI environment! `go install cogentcore.org/lab/tensor/cmd/ttail@latest` from anywhere to install.

* [eTorch](https://github.com/emer/etorch) is the emergent interface to PyTorch models, providing emergent GUI NetView etc for these models.

Expand Down
40 changes: 20 additions & 20 deletions actrf/actrf.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ package actrf
//go:generate core generate -add-types

import (
"cogentcore.org/core/tensor"
"cogentcore.org/core/tensor/stats/norm"
"slices"

"cogentcore.org/lab/stats/stats"
"cogentcore.org/lab/tensor"
)

// RF is used for computing an activation-based receptive field.
Expand Down Expand Up @@ -59,17 +61,15 @@ func (af *RF) InitShape(act, src tensor.Tensor) []int {
aNy, aNx, _, _ := tensor.Projection2DShape(act.Shape(), false)
sNy, sNx, _, _ := tensor.Projection2DShape(src.Shape(), false)
oshp := []int{aNy, aNx, sNy, sNx}
if tensor.EqualInts(af.RF.Shp.Sizes, oshp) {
if slices.Equal(af.RF.Shape().Sizes, oshp) {
return oshp
}
snm := []string{"ActY", "ActX", "SrcY", "SrcX"}
sshp := []int{sNy, sNx}
ssnm := []string{"SrcY", "SrcX"}
af.RF.SetShape(oshp, snm...)
af.NormRF.SetShape(oshp, snm...)
af.SumProd.SetShape(oshp, snm...)
af.NormSrc.SetShape(sshp, ssnm...)
af.SumSrc.SetShape(sshp, ssnm...)
af.RF.SetShapeSizes(oshp...)
af.NormRF.SetShapeSizes(oshp...)
af.SumProd.SetShapeSizes(oshp...)
af.NormSrc.SetShapeSizes(sshp...)
af.SumSrc.SetShapeSizes(sshp...)

af.ConfigView(&af.RF)
af.ConfigView(&af.NormRF)
Expand All @@ -81,10 +81,11 @@ func (af *RF) InitShape(act, src tensor.Tensor) []int {

// ConfigView configures the view params on the tensor
func (af *RF) ConfigView(tsr *tensor.Float32) {
tsr.SetMetaData("colormap", "Viridis")
tsr.SetMetaData("grid-fill", "1") // remove extra lines
tsr.SetMetaData("fix-min", "true")
tsr.SetMetaData("min", "0")
// todo:meta
// tsr.SetMetaData("colormap", "Viridis")
// tsr.SetMetaData("grid-fill", "1") // remove extra lines
// tsr.SetMetaData("fix-min", "true")
// tsr.SetMetaData("min", "0")
}

// Reset reinitializes the Sum accumulators -- must have called Init first
Expand All @@ -106,11 +107,11 @@ func (af *RF) Add(act, src tensor.Tensor, thr float32) {
if tv < thr {
continue
}
af.SumSrc.AddScalar([]int{sy, sx}, float64(tv))
af.SumSrc.SetAdd(tv, sy, sx)
for ay := 0; ay < aNy; ay++ {
for ax := 0; ax < aNx; ax++ {
av := float32(tensor.Projection2DValue(act, false, ay, ax))
af.SumProd.AddScalar([]int{ay, ax, sy, sx}, float64(av*tv))
af.SumProd.SetAdd(av*tv, ay, ax, sy, sx)
}
}
}
Expand All @@ -126,7 +127,7 @@ func (af *RF) Avg() {
var maxSrc float32
for sy := 0; sy < sNy; sy++ {
for sx := 0; sx < sNx; sx++ {
src := af.SumSrc.Value([]int{sy, sx})
src := af.SumSrc.Value(sy, sx)
if src == 0 {
continue
}
Expand All @@ -135,7 +136,7 @@ func (af *RF) Avg() {
}
for ay := 0; ay < aNy; ay++ {
for ax := 0; ax < aNx; ax++ {
oo := af.SumProd.Shape().Offset([]int{ay, ax, sy, sx})
oo := af.SumProd.Shape().IndexTo1D(ay, ax, sy, sx)
af.RF.Values[oo] = af.SumProd.Values[oo] / src
}
}
Expand All @@ -151,8 +152,7 @@ func (af *RF) Avg() {

// Norm computes unit norm of RF values -- must be called after Avg
func (af *RF) Norm() {
af.NormRF.CopyFrom(&af.RF)
norm.TensorUnit(&af.NormRF, 2) // 2 = norm within outer 2 dims = norm each src within
stats.UnitNormOut(&af.RF, &af.NormRF)
}

// AvgNorm computes RF as SumProd / SumTarg and then does Norm.
Expand Down
2 changes: 1 addition & 1 deletion actrf/actrfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
"fmt"

"cogentcore.org/core/base/errors"
"cogentcore.org/core/tensor"
"cogentcore.org/lab/tensor"
)

// RFs manages multiple named RF's -- each one must be initialized first
Expand Down
4 changes: 2 additions & 2 deletions actrf/mpi.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
package actrf

import (
"cogentcore.org/core/base/mpi"
"cogentcore.org/core/tensor/tensormpi"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/tensor/tensormpi"
)

// MPISum aggregates RF Sum data across all processors in given mpi communicator.
Expand Down
7 changes: 3 additions & 4 deletions actrf/running.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

package actrf

import "cogentcore.org/core/tensor"
import "cogentcore.org/lab/tensor"

// RunningAvg computes a running-average activation-based receptive field
// for activities act relative to source activations src (the thing we're projecting rf onto)
Expand All @@ -17,15 +17,14 @@ func RunningAvg(out *tensor.Float32, act, src tensor.Tensor, tau float32) {
aNy, aNx, _, _ := tensor.Projection2DShape(act.Shape(), false)
tNy, tNx, _, _ := tensor.Projection2DShape(src.Shape(), false)
oshp := []int{aNy, aNx, tNy, tNx}
out.SetShape(oshp, "ActY", "ActX", "SrcY", "SrcX")
out.SetShapeSizes(oshp...)
for ay := 0; ay < aNy; ay++ {
for ax := 0; ax < aNx; ax++ {
av := float32(tensor.Projection2DValue(act, false, ay, ax))
for ty := 0; ty < tNy; ty++ {
for tx := 0; tx < tNx; tx++ {
tv := float32(tensor.Projection2DValue(src, false, ty, tx))
oi := []int{ay, ax, ty, tx}
oo := out.Shape().Offset(oi)
oo := out.Shape().IndexTo1D(ay, ax, ty, tx)
ov := out.Values[oo]
nv := cdt*ov + dt*tv*av
out.Values[oo] = nv
Expand Down
2 changes: 1 addition & 1 deletion chem/stater.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

package chem

import "cogentcore.org/core/tensor/table"
import "cogentcore.org/lab/table"

// The Stater interface defines the functions implemented for State
// structures containing chem state variables.
Expand Down
35 changes: 1 addition & 34 deletions confusion/confusion.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,7 @@ package confusion

//go:generate core generate -add-types

import (
"fmt"
"math"

"cogentcore.org/core/core"
"cogentcore.org/core/tensor"
"cogentcore.org/core/tensor/stats/simat"
)
/*
// Matrix computes the confusion matrix, with rows representing
// the ground truth correct class, and columns representing the
Expand Down Expand Up @@ -208,30 +201,4 @@ func (cm *Matrix) SaveCSV(fname core.Filename) {
func (cm *Matrix) OpenCSV(fname core.Filename) {
tensor.OpenCSV(&cm.Prob, fname, ',')
}

/*
var MatrixProps = tree.Props{
"ToolBar": tree.PropSlice{
{"SaveCSV", tree.Props{
"label": "Save CSV...",
"icon": "file-save",
"desc": "Save CSV-formatted confusion probabilities (Probs)",
"Args": tree.PropSlice{
{"CSV File Name", tree.Props{
"ext": ".csv",
}},
},
}},
{"OpenCSV", tree.Props{
"label": "Open CSV...",
"icon": "file-open",
"desc": "Open CSV-formatted confusion probabilities (Probs)",
"Args": tree.PropSlice{
{"Weights File Name", tree.Props{
"ext": ".csv",
}},
},
}},
},
}
*/
9 changes: 0 additions & 9 deletions confusion/typegen.go

This file was deleted.

12 changes: 6 additions & 6 deletions decoder/linear.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ package decoder
import (
"fmt"

"cogentcore.org/core/base/mpi"
"cogentcore.org/core/math32"
"cogentcore.org/core/tensor"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/tensor"
)

type ActivationFunc func(float32) float32
Expand Down Expand Up @@ -61,7 +61,7 @@ type Linear struct {
// Layer is the subset of emer.Layer that is used by this code
type Layer interface {
Name() string
UnitValuesTensor(tsr tensor.Tensor, varNm string, di int) error
UnitValuesTensor(tsr tensor.Values, varNm string, di int) error
Shape() *tensor.Shape
}

Expand Down Expand Up @@ -111,7 +111,7 @@ func (dec *Linear) Init(nOutputs, nInputs int, poolIndex int, activationFn Activ
dec.NOutputs = nOutputs
dec.Units = make([]LinearUnit, dec.NOutputs)
dec.Inputs = make([]float32, dec.NInputs)
dec.Weights.SetShape([]int{dec.NOutputs, dec.NInputs}, "Outputs", "Inputs")
dec.Weights.SetShapeSizes(dec.NOutputs, dec.NInputs)
for i := range dec.Weights.Values {
dec.Weights.Values[i] = 0.1
}
Expand Down Expand Up @@ -207,7 +207,7 @@ func (dec *Linear) Input(varNm string, di int) {
shape := ly.Shape()
y := dec.PoolIndex / shape.DimSize(1)
x := dec.PoolIndex % shape.DimSize(1)
tsr = tsr.SubSpace([]int{y, x}).(*tensor.Float32)
tsr = tsr.SubSpace(y, x).(*tensor.Float32)
}
for j, v := range tsr.Values {
dec.Inputs[off+j] = v
Expand Down Expand Up @@ -259,7 +259,7 @@ func (dec *Linear) Back() float32 {
// Returns SSE (sum squared error) of difference between targets and outputs.
func (dec *Linear) BackMPI() float32 {
if dec.MPIDWts.Len() != dec.Weights.Len() {
dec.MPIDWts.CopyShapeFrom(&dec.Weights)
tensor.SetShapeFrom(&dec.MPIDWts, &dec.Weights)
}
var sse float32
for ui := range dec.Units {
Expand Down
32 changes: 16 additions & 16 deletions decoder/linear_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,25 +8,25 @@ import (
"fmt"
"testing"

"cogentcore.org/core/tensor"
"cogentcore.org/lab/tensor"
"github.com/stretchr/testify/assert"
)

// TestLayer implements a Layer
type TestLayer struct {
tensors map[string]tensor.Tensor
tensors map[string]tensor.Values
}

func (tl *TestLayer) Name() string {
return "TestLayer"
}

func (tl *TestLayer) UnitValuesTensor(tsr tensor.Tensor, varNm string, di int) error {
func (tl *TestLayer) UnitValuesTensor(tsr tensor.Values, varNm string, di int) error {
src, ok := tl.tensors[varNm]
if !ok {
return fmt.Errorf("bad key: %s", varNm)
}
tsr.CopyShapeFrom(src)
tensor.SetShapeFrom(tsr, src)
tsr.CopyFrom(src)
return nil
}
Expand Down Expand Up @@ -94,58 +94,58 @@ func TestLinearLogistic(t *testing.T) {

func TestInputPool1D(t *testing.T) {
dec := Linear{}
shape := tensor.NewShape([]int{1, 5, 6, 6})
shape := tensor.NewShape(1, 5, 6, 6)
vals := make([]float32, shape.Len())
for i := range vals {
vals[i] = float32(i)
}
tsr := tensor.NewFloat32(shape.Sizes)
tsr := tensor.NewFloat32(shape.Sizes...)
tsr.SetNumRows(1)
for i := range tsr.Values {
tsr.Values[i] = vals[i]
}
layer := TestLayer{tensors: map[string]tensor.Tensor{"var0": tsr}}
layer := TestLayer{tensors: map[string]tensor.Values{"var0": tsr}}
dec.InitPool(2, &layer, 0, IdentityFunc)
dec.Input("var0", 0)
expected := tsr.SubSpace([]int{0, 0}).(*tensor.Float32).Values
expected := tsr.SubSpace(0, 0).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 1, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{0, 1}).(*tensor.Float32).Values
expected = tsr.SubSpace(0, 1).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)
}

func TestInputPool2D(t *testing.T) {
dec := Linear{}
shape := tensor.NewShape([]int{2, 5, 6, 6})
shape := tensor.NewShape(2, 5, 6, 6)
vals := make([]float32, shape.Len())
for i := range vals {
vals[i] = float32(i)
}
tsr := tensor.NewFloat32(shape.Sizes)
tsr := tensor.NewFloat32(shape.Sizes...)
for i := range tsr.Values {
tsr.Values[i] = vals[i]
}

layer := TestLayer{tensors: map[string]tensor.Tensor{"var0": tsr}}
layer := TestLayer{tensors: map[string]tensor.Values{"var0": tsr}}
dec.InitPool(2, &layer, 0, IdentityFunc)
dec.Input("var0", 0)
expected := tsr.SubSpace([]int{0, 0}).(*tensor.Float32).Values
expected := tsr.SubSpace(0, 0).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 1, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{0, 1}).(*tensor.Float32).Values
expected = tsr.SubSpace(0, 1).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 5, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{1, 0}).(*tensor.Float32).Values
expected = tsr.SubSpace(1, 0).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)

dec.InitPool(2, &layer, 9, IdentityFunc)
dec.Input("var0", 0)
expected = tsr.SubSpace([]int{1, 4}).(*tensor.Float32).Values
expected = tsr.SubSpace(1, 4).(*tensor.Float32).Values
assert.Equal(t, expected, dec.Inputs)
}
8 changes: 4 additions & 4 deletions decoder/softmax.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ import (
"path/filepath"
"sort"

"cogentcore.org/core/base/mpi"
"cogentcore.org/core/math32"
"cogentcore.org/core/tensor"
"cogentcore.org/lab/base/mpi"
"cogentcore.org/lab/tensor"
"github.com/emer/emergent/v2/emer"
)

Expand Down Expand Up @@ -93,7 +93,7 @@ func (sm *SoftMax) Init(ncats, ninputs int) {
sm.Units = make([]SoftMaxUnit, ncats)
sm.Sorted = make([]int, ncats)
sm.Inputs = make([]float32, sm.NInputs)
sm.Weights.SetShape([]int{sm.NCats, sm.NInputs}, "Cats", "Inputs")
sm.Weights.SetShapeSizes(sm.NCats, sm.NInputs)
for i := range sm.Weights.Values {
sm.Weights.Values[i] = .1
}
Expand Down Expand Up @@ -215,7 +215,7 @@ func (sm *SoftMax) Back() {
// MPI version shares weight changes across nodes
func (sm *SoftMax) BackMPI() {
if sm.MPIDWts.Len() != sm.Weights.Len() {
sm.MPIDWts.CopyShapeFrom(&sm.Weights)
tensor.SetShapeFrom(&sm.MPIDWts, &sm.Weights)
}
lr := sm.Lrate
for ui := range sm.Units {
Expand Down
Loading

0 comments on commit 807532e

Please sign in to comment.