Fix wrong matrix sizes in layer constructor
This commit is contained in:
parent
627679252f
commit
db04387314
30
layer.h
30
layer.h
@ -1,3 +1,6 @@
|
|||||||
|
#ifndef LAYER_H_
|
||||||
|
#define LAYER_H_
|
||||||
|
|
||||||
#include "matrices.h"
|
#include "matrices.h"
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
@ -15,8 +18,6 @@ class Layer {
|
|||||||
static inline float Sigmoid(float);
|
static inline float Sigmoid(float);
|
||||||
static inline float SigmoidPrime(float);
|
static inline float SigmoidPrime(float);
|
||||||
|
|
||||||
inline Layer(int); // Number of neurons
|
|
||||||
|
|
||||||
inline void Forward(); // Forward Pass with sigmoid
|
inline void Forward(); // Forward Pass with sigmoid
|
||||||
inline void Forward(float (*activation)(float)); // Forward Pass with custom activation function
|
inline void Forward(float (*activation)(float)); // Forward Pass with custom activation function
|
||||||
|
|
||||||
@ -29,21 +30,22 @@ class Layer {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Layer::Layer(){
|
Layer::Layer(){
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Layer::Layer(int input_size, int size){
|
Layer::Layer(int input_size, int size){
|
||||||
this->input = Matrix(input_size, 1);
|
this->input = Matrix(input_size, 1);
|
||||||
|
|
||||||
this->weights = Matrix(input_size, size);
|
// Every neuron has a weight for every input
|
||||||
this->weights.Randomize(0.0F, 1.0F);
|
this->weights = Matrix(size, input_size);
|
||||||
|
this->weights.Randomize(-1.0F, 1.0F);
|
||||||
|
|
||||||
// Z, A and B are the same size as A
|
this->raw_output = Matrix(size, 1);
|
||||||
this->raw_output = this->input;
|
this->activated_output = this->raw_output;
|
||||||
this->activated_output = this->input;
|
|
||||||
|
|
||||||
this->biases = this->input;
|
// One bias per neuron
|
||||||
this->biases.Randomize(0.0F, 1.0F);
|
this->biases = Matrix(size, 1);
|
||||||
|
this->biases.Randomize(-1.0F, 1.0F);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Layer::Feed(Matrix a){
|
void Layer::Feed(Matrix a){
|
||||||
@ -60,9 +62,9 @@ float Layer::SigmoidPrime(float x){
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Layer::Forward(float (*activation)(float)){
|
void Layer::Forward(float (*activation)(float)){
|
||||||
// Multiply inputs by weights
|
// Multiply weight matrix by input matrix
|
||||||
// W x I + B = Z
|
// W x I + B = Z
|
||||||
this->raw_output = this->input.Multiply(&this->weights).Add(&this->biases);
|
this->raw_output = this->weights.Multiply(&this->input).Add(&this->biases);
|
||||||
|
|
||||||
// Now through activation function
|
// Now through activation function
|
||||||
// A = F(Z)
|
// A = F(Z)
|
||||||
@ -71,4 +73,6 @@ void Layer::Forward(float (*activation)(float)){
|
|||||||
|
|
||||||
void Layer::Forward(){
|
void Layer::Forward(){
|
||||||
this->Forward(&Layer::Sigmoid);
|
this->Forward(&Layer::Sigmoid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
Loading…
x
Reference in New Issue
Block a user