neuralnet/network.go

265 lines
6.3 KiB
Go

package nn
import (
"fmt"
"gonum.org/v1/gonum/mat"
"math"
"math/rand"
"time"
)
type Network struct {
layers []uint // number of neurons of each layer
fed bool // whether the network has the state fed by a forward pass
Neurons []*mat.VecDense // neuron value vector for each layer (size=L)
Biases []*mat.VecDense // neuron bias vector for each layer (size=L-1)
Weights []*mat.Dense // weights between each 2-layers (size=L-1)
}
const MaxLayerCount = 255
const LearningRate = 1
// Empty creates a new network where each argument is the number of neurons of a layer
// each param represents a layer, including input/output
func Empty(_layers ...uint) (*Network, error) {
var L = len(_layers)
// check size
if L < 2 {
return nil, ErrMissingLayers
} else if L > MaxLayerCount {
return nil, ErrTooMuchLayers
}
net := &Network{
layers: _layers,
fed: false,
Neurons: make([]*mat.VecDense, 0),
Biases: make([]*mat.VecDense, 0),
Weights: make([]*mat.Dense, 0),
}
for i, layer := range _layers {
// check neuron count
if layer < 1 {
return nil, ErrEmptyLayer
}
// create neurons
net.Neurons = append(net.Neurons, mat.NewVecDense(int(layer), nil))
// do not create weights nor biases for first layer
// (no previous layer to bound to)
if i == 0 {
continue
}
// create biases
biases := make([]float64, 0, layer+1)
for b := uint(0); b < layer; b++ {
rand.Seed(time.Now().UnixNano())
biases = append(biases, rand.Float64())
}
biasesVec := mat.NewVecDense(int(layer), biases)
net.Biases = append(net.Biases, biasesVec)
rows, cols := int(layer), int(_layers[i-1])
weights := make([]float64, 0, rows*cols+1)
for v := 0; v < rows*cols; v++ {
rand.Seed(time.Now().UnixNano())
weights = append(weights, rand.Float64())
}
weightsMat := mat.NewDense(rows, cols, weights)
net.Weights = append(net.Weights, weightsMat)
}
return net, nil
}
// reset neuron values
func (net *Network) reset() {
net.fed = false
for i, _ := range net.Neurons {
net.Neurons[i] = mat.NewVecDense(int(net.layers[i]), nil)
}
}
// Forward processes a forward propagation from an input vector
// and lets the network in the final processing state
func (net *Network) Forward(_input ...float64) ([]float64, error) {
// check input size
if len(_input) < net.Neurons[0].Len() {
return nil, ErrMissingInput
}
// reset neuron values
net.reset()
// forward input to first layer
for n, l := 0, net.Neurons[0].Len(); n < l; n++ {
net.Neurons[0].SetVec(n, _input[n])
}
// process each layer from the previous one
for l, ll := 1, len(net.layers); l < ll; l++ {
// Z = w^l . a^(l-1) + b^l
z := new(mat.Dense)
a := net.Neurons[l-1] // neurons of previous layer
w := net.Weights[l-1] // shifted by 1 because no weights between layers -1 and 0
b := net.Biases[l-1] // same shift as weights
z.Mul(w, a)
z.Add(z, b)
z.Apply(sigmoid, z)
// copy values (first line = vector)
net.Neurons[l].CloneVec(z.ColView(0))
}
net.fed = true
// format output
outputLayer := net.Neurons[len(net.Neurons)-1]
output := make([]float64, 0, net.layers[len(net.layers)-1])
for n, l := 0, outputLayer.Len(); n < l; n++ {
output = append(output, outputLayer.AtVec(n))
}
return output, nil
}
// Cost returns the cost from the given output
func (net *Network) Cost(_expect ...float64) (float64, error) {
outputLayer := net.Neurons[len(net.Neurons)-1]
// check output size
if len(_expect) < outputLayer.Len() {
return 0, ErrMissingOutput
}
var Cost float64
// process cost
for n, l := 0, outputLayer.Len(); n < l; n++ {
Cost += math.Pow(outputLayer.AtVec(n)-_expect[n], 2) * LearningRate
}
return Cost, nil
}
// CostDerVec returns the cost derivative for each output (as a vector)
// from the given _expect data
func (net *Network) CostDerVec(_expect ...float64) (*mat.VecDense, error) {
outLayer := net.Neurons[len(net.Neurons)-1]
// check output size
if len(_expect) < outLayer.Len() {
return nil, ErrMissingOutput
}
Cost := mat.NewVecDense(outLayer.Len(), nil)
// process cost
for n, expect := range _expect {
Cost.SetVec(n, LearningRate*2*(outLayer.AtVec(n)-expect))
}
return Cost, nil
}
// Backward processes the backpropagation from the current network state
// and the expected data : _expect
func (net *Network) Backward(_expect ...float64) error {
// 0. fail on no state (no forward pass applied first)
if !net.fed {
return ErrNoState
}
// 1. Prepare receiver network
delta, err := Empty(net.layers...)
if err != nil {
return err
}
// 2. Get cost
cost, err := net.CostDerVec(_expect...)
if err != nil {
return err
}
// replace delta neuron values with the cost derivative
deltaOutLayer := delta.Neurons[len(delta.Neurons)-1]
for n, nl := 0, deltaOutLayer.Len(); n < nl; n++ {
deltaOutLayer.SetVec(n, cost.AtVec(n))
}
// 3. for each layer (except last)
for l := len(net.layers) - 1; l > 0; l-- {
// process weights/biases between l and (l-1)
for prev := 0; prev < int(net.layers[l-1]); prev++ {
// init sum to get the previous layers' neuron cost derivative
prevCostDer := float64(0)
for cur := 0; cur < int(net.layers[l]); cur++ {
sigmoidDer := sigmoidToDerivative(net.Neurons[l].AtVec(cur))
curCostDer := delta.Neurons[l].AtVec(cur)
// bias = sigmoid' . (cost derivative of current neuron)
if prev == 0 {
bias := sigmoidDer
bias *= curCostDer
delta.Biases[l-1].SetVec(cur, bias)
}
// weight = a^prev . sigmoid' . (cost derivative of current neuron)
weight := net.Neurons[l-1].AtVec(prev)
weight *= sigmoidDer
weight *= curCostDer
delta.Weights[l-1].Set(cur, prev, weight)
// add each weight to derivative of the previous neuron : weight * sigmoid' * (cost derivative of current neuron)
prevCostDer += weight * sigmoidDer * curCostDer
}
// update previous layer neuron cost derivative
delta.Neurons[l-1].SetVec(prev, prevCostDer)
}
}
// 4. Apply backpropagation
// each bias
for b, bias := range net.Biases {
bias.SubVec(bias, delta.Biases[b])
}
// each weight
for w, weight := range net.Weights {
weight.Sub(weight, delta.Weights[w])
}
outLayer := net.Neurons[len(net.Neurons)-1]
for i, l := 0, deltaOutLayer.Len(); i < l; i++ {
fmt.Printf("[out.%d.deriv] = %f - %f = %f\n", i, outLayer.AtVec(i), delta.Neurons[len(delta.Neurons)-1].AtVec(i), _expect[i])
}
return nil
}