unexport network features (neurons, weights, biases)

This commit is contained in:
Adrien Marquès 2018-10-15 06:54:38 +02:00
parent a180e09cf0
commit eee503a405
3 changed files with 62 additions and 54 deletions

View File

@ -11,9 +11,9 @@ type Network struct {
fed bool // whether the network has the state fed by a forward pass fed bool // whether the network has the state fed by a forward pass
Neurons []*mat.Dense // neuron value vector for each layer (size=L) neurons []*mat.Dense // neuron value vector for each layer (size=L)
Biases []*mat.Dense // neuron bias vector for each layer (size=L-1) biases []*mat.Dense // neuron bias vector for each layer (size=L-1)
Weights []*mat.Dense // weights between each 2-layers (size=L-1) weights []*mat.Dense // weights between each 2-layers (size=L-1)
} }
const MaxLayerCount = 255 const MaxLayerCount = 255
@ -34,9 +34,9 @@ func Empty(_layers ...uint) (*Network, error) {
net := &Network{ net := &Network{
layers: _layers, layers: _layers,
fed: false, fed: false,
Neurons: make([]*mat.Dense, 0), neurons: make([]*mat.Dense, 0),
Biases: make([]*mat.Dense, 0), biases: make([]*mat.Dense, 0),
Weights: make([]*mat.Dense, 0), weights: make([]*mat.Dense, 0),
} }
for i, layer := range _layers { for i, layer := range _layers {
@ -47,7 +47,7 @@ func Empty(_layers ...uint) (*Network, error) {
} }
// create neurons // create neurons
net.Neurons = append(net.Neurons, mat.NewDense(int(layer), 1, nil)) net.neurons = append(net.neurons, mat.NewDense(int(layer), 1, nil))
// do not create weights nor biases for first layer // do not create weights nor biases for first layer
// (no previous layer to bound to) // (no previous layer to bound to)
@ -62,7 +62,7 @@ func Empty(_layers ...uint) (*Network, error) {
biases = append(biases, rand.Float64()) biases = append(biases, rand.Float64())
} }
biasesVec := mat.NewDense(int(layer), 1, biases) biasesVec := mat.NewDense(int(layer), 1, biases)
net.Biases = append(net.Biases, biasesVec) net.biases = append(net.biases, biasesVec)
rows, cols := int(layer), int(_layers[i-1]) rows, cols := int(layer), int(_layers[i-1])
weights := make([]float64, 0, rows*cols+1) weights := make([]float64, 0, rows*cols+1)
@ -71,7 +71,7 @@ func Empty(_layers ...uint) (*Network, error) {
weights = append(weights, rand.Float64()) weights = append(weights, rand.Float64())
} }
weightsMat := mat.NewDense(rows, cols, weights) weightsMat := mat.NewDense(rows, cols, weights)
net.Weights = append(net.Weights, weightsMat) net.weights = append(net.weights, weightsMat)
} }
@ -83,8 +83,8 @@ func Empty(_layers ...uint) (*Network, error) {
func (net *Network) reset() { func (net *Network) reset() {
net.fed = false net.fed = false
for i, _ := range net.Neurons { for i, _ := range net.neurons {
net.Neurons[i] = mat.NewDense(int(net.layers[i]), 1, nil) net.neurons[i] = mat.NewDense(int(net.layers[i]), 1, nil)
} }
} }
@ -93,26 +93,26 @@ func (net *Network) reset() {
func (net *Network) forward(_input ...float64) error { func (net *Network) forward(_input ...float64) error {
// check input size // check input size
if len(_input) < net.Neurons[0].ColView(0).Len() { if len(_input) < net.neurons[0].ColView(0).Len() {
return ErrMissingInput return ErrMissingInput
} }
// reset neuron values // reset neuron values
net.reset() net.reset()
// forward input to first layer // forward input to first layer
for n, l := 0, net.Neurons[0].ColView(0).Len(); n < l; n++ { for n, l := 0, net.neurons[0].ColView(0).Len(); n < l; n++ {
net.Neurons[0].Set(n, 0, _input[n]) net.neurons[0].Set(n, 0, _input[n])
} }
// process each layer from the previous one // process each layer from the previous one
for l, ll := 1, len(net.layers); l < ll; l++ { for l, ll := 1, len(net.layers); l < ll; l++ {
// Z = w^l . a^(l-1) + b^l // Z = w^l . a^(l-1) + b^l
z := net.Neurons[l] z := net.neurons[l]
a := net.Neurons[l-1] // neurons of previous layer a := net.neurons[l-1] // neurons of previous layer
w := net.Weights[l-1] // shifted by 1 because no weights between layers -1 and 0 w := net.weights[l-1] // shifted by 1 because no weights between layers -1 and 0
b := net.Biases[l-1] // same shift as weights b := net.biases[l-1] // same shift as weights
z.Mul(w, a) z.Mul(w, a)
z.Add(z, b) z.Add(z, b)
@ -140,7 +140,11 @@ func (net *Network) Cost(_expect ...float64) (float64, error) {
// from the given _expect data // from the given _expect data
func (net *Network) costVec(_expect ...float64) (*mat.Dense, error) { func (net *Network) costVec(_expect ...float64) (*mat.Dense, error) {
out := net.Neurons[len(net.Neurons)-1] if !net.fed {
return nil, ErrNoState
}
out := net.neurons[len(net.neurons)-1]
// check output size // check output size
if len(_expect) < out.ColView(0).Len() { if len(_expect) < out.ColView(0).Len() {
@ -163,7 +167,11 @@ func (net *Network) costVec(_expect ...float64) (*mat.Dense, error) {
// output (as a vector) from the given _expect data // output (as a vector) from the given _expect data
func (net *Network) errorVec(_expect ...float64) (*mat.Dense, error) { func (net *Network) errorVec(_expect ...float64) (*mat.Dense, error) {
outLayer := net.Neurons[len(net.Neurons)-1] if !net.fed {
return nil, ErrNoState
}
outLayer := net.neurons[len(net.neurons)-1]
// check output size // check output size
if len(_expect) < outLayer.ColView(0).Len() { if len(_expect) < outLayer.ColView(0).Len() {
@ -186,7 +194,7 @@ func (net *Network) errorVec(_expect ...float64) (*mat.Dense, error) {
// and the expected data : _expect // and the expected data : _expect
func (net *Network) backward(_expect ...float64) error { func (net *Network) backward(_expect ...float64) error {
out := net.Neurons[len(net.Neurons)-1] out := net.neurons[len(net.neurons)-1]
// 1. fail on no state (no forward pass applied first) // 1. fail on no state (no forward pass applied first)
if !net.fed { if !net.fed {
@ -208,10 +216,10 @@ func (net *Network) backward(_expect ...float64) error {
// FOR EACH LAYER (from last to 1) // FOR EACH LAYER (from last to 1)
for l := len(net.layers) - 1; l > 0; l-- { for l := len(net.layers) - 1; l > 0; l-- {
neurons := net.Neurons[l] neurons := net.neurons[l]
previous := net.Neurons[l-1] previous := net.neurons[l-1]
weights := net.Weights[l-1] // from l-1 to l weights := net.weights[l-1] // from l-1 to l
biases := net.Biases[l-1] // at l biases := net.biases[l-1] // at l
// calc GRADIENTS = sigmoid'( neuron[l-1] ) // calc GRADIENTS = sigmoid'( neuron[l-1] )
gradients := new(mat.Dense) gradients := new(mat.Dense)
@ -258,7 +266,7 @@ func (net *Network) Guess(_input ...float64) ([]float64, error) {
// to guess the _expect instead // to guess the _expect instead
func (net *Network) Train(_input []float64, _expect []float64) error { func (net *Network) Train(_input []float64, _expect []float64) error {
out := net.Neurons[len(net.Neurons)-1] out := net.neurons[len(net.neurons)-1]
// check output size // check output size
if len(_expect) != out.ColView(0).Len() { if len(_expect) != out.ColView(0).Len() {
@ -283,7 +291,7 @@ func (net Network) Output() ([]float64, error) {
return nil, ErrNoState return nil, ErrNoState
} }
out := net.Neurons[len(net.Neurons)-1] out := net.neurons[len(net.neurons)-1]
output := make([]float64, 0, net.layers[len(net.layers)-1]) output := make([]float64, 0, net.layers[len(net.layers)-1])
for n, l := 0, out.ColView(0).Len(); n < l; n++ { for n, l := 0, out.ColView(0).Len(); n < l; n++ {
output = append(output, out.At(n, 0)) output = append(output, out.At(n, 0))

View File

@ -67,31 +67,31 @@ func TestEmptyNetworkSizes(t *testing.T) {
} }
// 1. Check neuron layer count // 1. Check neuron layer count
if len(net.Neurons) != len(test) { if len(net.neurons) != len(test) {
t.Errorf("Expected %d layers of neurons, got %d", len(test), len(net.Neurons)) t.Errorf("Expected %d layers of neurons, got %d", len(test), len(net.neurons))
continue continue
} }
// 2. Check bias layer count (layers-1) // 2. Check bias layer count (layers-1)
if len(net.Biases) != len(test)-1 { if len(net.biases) != len(test)-1 {
t.Errorf("Expected %d layers of biases, got %d", len(test)-1, len(net.Biases)) t.Errorf("Expected %d layers of biases, got %d", len(test)-1, len(net.biases))
continue continue
} }
// 3. Check weights layer count (layers-1) // 3. Check weights layer count (layers-1)
if len(net.Weights) != len(test)-1 { if len(net.weights) != len(test)-1 {
t.Errorf("Expected %d layers of weights, got %d", len(test)-1, len(net.Weights)) t.Errorf("Expected %d layers of weights, got %d", len(test)-1, len(net.weights))
continue continue
} }
// 4. Check each neuron layer count // 4. Check each neuron layer count
for n, neuron := range net.Neurons { for n, neuron := range net.neurons {
if uint(neuron.ColView(0).Len()) != test[n] { if uint(neuron.ColView(0).Len()) != test[n] {
t.Errorf("Expected %d neurons on layer %d, got %d", test[n], n, neuron.ColView(0).Len()) t.Errorf("Expected %d neurons on layer %d, got %d", test[n], n, neuron.ColView(0).Len())
} }
} }
// 5. Check each bias layer count // 5. Check each bias layer count
for b, bias := range net.Biases { for b, bias := range net.biases {
if uint(bias.ColView(0).Len()) != test[b+1] { if uint(bias.ColView(0).Len()) != test[b+1] {
t.Errorf("Expected %d biases on layer %d, got %d", test[b+1], b, bias.ColView(0).Len()) t.Errorf("Expected %d biases on layer %d, got %d", test[b+1], b, bias.ColView(0).Len())
@ -100,7 +100,7 @@ func TestEmptyNetworkSizes(t *testing.T) {
} }
// 6. Check each weight layer count // 6. Check each weight layer count
for w, weight := range net.Weights { for w, weight := range net.weights {
rows, cols := weight.Dims() rows, cols := weight.Dims()
@ -151,19 +151,19 @@ func TestForwardPass(t *testing.T) {
for l, ll := 1, len(net.layers); l < ll; l++ { for l, ll := 1, len(net.layers); l < ll; l++ {
// each neuron = ( each previous neuron times its weight ) + neuron bias // each neuron = ( each previous neuron times its weight ) + neuron bias
for n, nl := 0, net.Neurons[l].ColView(0).Len(); n < nl; n++ { for n, nl := 0, net.neurons[l].ColView(0).Len(); n < nl; n++ {
sum := net.Biases[l-1].At(n, 0) sum := net.biases[l-1].At(n, 0)
// sum each previous neuron*its weight // sum each previous neuron*its weight
for i, il := 0, net.Neurons[l-1].ColView(0).Len(); i < il; i++ { for i, il := 0, net.neurons[l-1].ColView(0).Len(); i < il; i++ {
sum += net.Neurons[l-1].At(i, 0) * net.Weights[l-1].At(n, i) sum += net.neurons[l-1].At(i, 0) * net.weights[l-1].At(n, i)
} }
sum = sigmoid(0, 0, sum) sum = sigmoid(0, 0, sum)
// check sum // check sum
if !floats.EqualWithinAbs(net.Neurons[l].At(n, 0), sum, 1e9) { if !floats.EqualWithinAbs(net.neurons[l].At(n, 0), sum, 1e9) {
t.Fatalf("Expected neuron %d.%d to be %f, got %f", l, n, sum, net.Neurons[l].At(n, 0)) t.Fatalf("Expected neuron %d.%d to be %f, got %f", l, n, sum, net.neurons[l].At(n, 0))
} }
} }

View File

@ -24,7 +24,7 @@ func (net *Network) MarshalJSON() ([]byte, error) {
// 2. Biases // 2. Biases
raw.Biases = make([][]float64, 0) raw.Biases = make([][]float64, 0)
for _, bias := range net.Biases { for _, bias := range net.biases {
vector := bias.ColView(0) vector := bias.ColView(0)
biasJSON := make([]float64, 0) biasJSON := make([]float64, 0)
@ -38,7 +38,7 @@ func (net *Network) MarshalJSON() ([]byte, error) {
// 3. Weights // 3. Weights
raw.Weights = make([][]float64, 0) raw.Weights = make([][]float64, 0)
for _, weight := range net.Weights { for _, weight := range net.weights {
rows, cols := weight.Dims() rows, cols := weight.Dims()
weightJSON := make([]float64, 0) weightJSON := make([]float64, 0)
@ -70,28 +70,28 @@ func (net *Network) UnmarshalJSON(in []byte) error {
net.layers = raw.Layers net.layers = raw.Layers
// extract biases // extract biases
net.Biases = make([]*mat.Dense, 0) net.biases = make([]*mat.Dense, 0)
for l, layer := range net.layers { for l, layer := range net.layers {
if l == 0 { if l == 0 {
continue continue
} }
net.Biases = append(net.Biases, mat.NewDense(int(layer), 1, raw.Biases[l-1])) net.biases = append(net.biases, mat.NewDense(int(layer), 1, raw.Biases[l-1]))
} }
// extract weights // extract weights
net.Weights = make([]*mat.Dense, 0) net.weights = make([]*mat.Dense, 0)
for l, layer := range net.layers { for l, layer := range net.layers {
if l == 0 { if l == 0 {
continue continue
} }
net.Weights = append(net.Weights, mat.NewDense(int(layer), int(net.layers[l-1]), raw.Weights[l-1])) net.weights = append(net.weights, mat.NewDense(int(layer), int(net.layers[l-1]), raw.Weights[l-1]))
} }
// mockup neurons // mockup neurons
net.Neurons = make([]*mat.Dense, 0) net.neurons = make([]*mat.Dense, 0)
for _, layer := range net.layers { for _, layer := range net.layers {
net.Neurons = append(net.Neurons, mat.NewDense(int(layer), 1, nil)) net.neurons = append(net.neurons, mat.NewDense(int(layer), 1, nil))
} }
fmt.Printf("neurons: %v\n", net.Neurons) fmt.Printf("neurons: %v\n", net.neurons)
// extract into the current network receiver (net) // extract into the current network receiver (net)
return nil return nil
@ -130,9 +130,9 @@ func (net *Network) ReadFrom(r io.Reader) (int64, error) {
// copy values // copy values
net.layers = readNet.layers net.layers = readNet.layers
net.fed = readNet.fed net.fed = readNet.fed
net.Neurons = readNet.Neurons net.neurons = readNet.neurons
net.Biases = readNet.Biases net.biases = readNet.biases
net.Weights = readNet.Weights net.weights = readNet.weights
return int64(len(raw)), nil return int64(len(raw)), nil
} }