This commit is contained in:
vik 2024-11-28 19:44:23 -06:00
parent 95ad0ca8c4
commit 8808242715
3 changed files with 23 additions and 4 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 311 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 322 KiB

27
snn.c
View File

@ -3,6 +3,8 @@
#include <math.h> #include <math.h>
#include <assert.h> #include <assert.h>
#include <gsl/matrix> #include <gsl/matrix>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_cblas.h>
#define ALPHA 0.2 #define ALPHA 0.2
@ -61,13 +63,30 @@ void forwardprop(Layer* layer) {
} }
} }
void cost(Layer* layer, gsl_matrix* expected) { double matrixsum(gsl_matrix* matrix) {
// (for mnist at least) your expected will be a matrix of [10x1] double result;
assert(layer->values->size1 == expected->size1); for (unsigned int i = 0; i < matrix->size1; i++) {
for (unsigned int j = 0; j < matrix->size2; j++) {
result += gsl_matrix_get(matrix, i, j);
}
}
return result;
}
double cost(Layer* layer, gsl_matrix* expected) {
// mean squared error
// (for mnist at least) your expected will be a matrix of [10x1]
// ONLY DO THIS ON THE OUTPUT LAYER!!!!!! the layer that should be passed in is the output layer ONLY
assert(layer->values->size1 == expected->size1);
gsl_matrix* result = gsl_matrix_alloc(expected->size1, 1);
gsl_matrix_memcpy(result, layer->values);
gsl_matrix_sub(result, expected);
gsl_matrix_mul_elements(result, result); // squares matrix
double matsum = matrixsum(result);
return (((double)1 / layer->neurons) * matsum);
} }
void backprop(Layer* layer) { void backprop(Layer* layer) {
assert(layer->previous != NULL); assert(layer->previous != NULL);
} }