aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author3gg <3gg@shellblade.net>2023-11-23 08:38:59 -0800
committer3gg <3gg@shellblade.net>2023-11-23 08:38:59 -0800
commit6ca8a31143f087f3bc470d39eb3c00156443802a (patch)
tree8a7462d28e75d0cfc4eff323f0b83ff12c6dc860
parent041613467a0915e6ec07cdab0ca3e7b8d757fe5f (diff)
Formatting.
-rw-r--r--src/lib/include/neuralnet/matrix.h15
-rw-r--r--src/lib/include/neuralnet/neuralnet.h8
-rw-r--r--src/lib/include/neuralnet/train.h20
-rw-r--r--src/lib/src/activation.h12
-rw-r--r--src/lib/src/matrix.c82
-rw-r--r--src/lib/src/neuralnet.c65
-rw-r--r--src/lib/src/neuralnet_impl.h12
-rw-r--r--src/lib/src/train.c236
8 files changed, 237 insertions, 213 deletions
diff --git a/src/lib/include/neuralnet/matrix.h b/src/lib/include/neuralnet/matrix.h
index 0cb40cf..b7281bf 100644
--- a/src/lib/include/neuralnet/matrix.h
+++ b/src/lib/include/neuralnet/matrix.h
@@ -33,7 +33,8 @@ void nnMatrixToArray(const nnMatrix* in, R* out);
33void nnMatrixRowToArray(const nnMatrix* in, int row, R* out); 33void nnMatrixRowToArray(const nnMatrix* in, int row, R* out);
34 34
35/// Copy a column from a source to a target matrix. 35/// Copy a column from a source to a target matrix.
36void nnMatrixCopyCol(const nnMatrix* in, nnMatrix* out, int col_in, int col_out); 36void nnMatrixCopyCol(
37 const nnMatrix* in, nnMatrix* out, int col_in, int col_out);
37 38
38/// Mutable borrow of a matrix. 39/// Mutable borrow of a matrix.
39nnMatrix nnMatrixBorrow(nnMatrix* in); 40nnMatrix nnMatrixBorrow(nnMatrix* in);
@@ -56,20 +57,24 @@ void nnMatrixMul(const nnMatrix* left, const nnMatrix* right, nnMatrix* out);
56/// 57///
57/// This function multiples two matrices row-by-row instead of row-by-column. 58/// This function multiples two matrices row-by-row instead of row-by-column.
58/// nnMatrixMul(A, B, O) == nnMatrixMulRows(A, B^T, O). 59/// nnMatrixMul(A, B, O) == nnMatrixMulRows(A, B^T, O).
59void nnMatrixMulRows(const nnMatrix* left, const nnMatrix* right, nnMatrix* out); 60void nnMatrixMulRows(
61 const nnMatrix* left, const nnMatrix* right, nnMatrix* out);
60 62
61/// Matrix multiply-add. 63/// Matrix multiply-add.
62/// 64///
63/// out = left + (right * scale) 65/// out = left + (right * scale)
64void nnMatrixMulAdd(const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out); 66void nnMatrixMulAdd(
67 const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out);
65 68
66/// Matrix multiply-subtract. 69/// Matrix multiply-subtract.
67/// 70///
68/// out = left - (right * scale) 71/// out = left - (right * scale)
69void nnMatrixMulSub(const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out); 72void nnMatrixMulSub(
73 const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out);
70 74
71/// Hadamard product of two matrices. 75/// Hadamard product of two matrices.
72void nnMatrixMulPairs(const nnMatrix* left, const nnMatrix* right, nnMatrix* out); 76void nnMatrixMulPairs(
77 const nnMatrix* left, const nnMatrix* right, nnMatrix* out);
73 78
74/// Add two matrices. 79/// Add two matrices.
75void nnMatrixAdd(const nnMatrix* left, const nnMatrix* right, nnMatrix* out); 80void nnMatrixAdd(const nnMatrix* left, const nnMatrix* right, nnMatrix* out);
diff --git a/src/lib/include/neuralnet/neuralnet.h b/src/lib/include/neuralnet/neuralnet.h
index 1cf1c53..05c9406 100644
--- a/src/lib/include/neuralnet/neuralnet.h
+++ b/src/lib/include/neuralnet/neuralnet.h
@@ -5,7 +5,7 @@
5typedef struct nnMatrix nnMatrix; 5typedef struct nnMatrix nnMatrix;
6 6
7typedef struct nnNeuralNetwork nnNeuralNetwork; 7typedef struct nnNeuralNetwork nnNeuralNetwork;
8typedef struct nnQueryObject nnQueryObject; 8typedef struct nnQueryObject nnQueryObject;
9 9
10/// Neuron activation. 10/// Neuron activation.
11typedef enum nnActivation { 11typedef enum nnActivation {
@@ -15,7 +15,8 @@ typedef enum nnActivation {
15} nnActivation; 15} nnActivation;
16 16
17/// Create a network. 17/// Create a network.
18nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActivation* activations); 18nnNeuralNetwork* nnMakeNet(
19 int num_layers, const int* layer_sizes, const nnActivation* activations);
19 20
20/// Delete the network and free its internal memory. 21/// Delete the network and free its internal memory.
21void nnDeleteNet(nnNeuralNetwork**); 22void nnDeleteNet(nnNeuralNetwork**);
@@ -36,7 +37,8 @@ void nnSetBiases(nnNeuralNetwork*, const R* biases);
36void nnQuery(const nnNeuralNetwork*, nnQueryObject*, const nnMatrix* input); 37void nnQuery(const nnNeuralNetwork*, nnQueryObject*, const nnMatrix* input);
37 38
38/// Query the network, array version. 39/// Query the network, array version.
39void nnQueryArray(const nnNeuralNetwork*, nnQueryObject*, const R* input, R* output); 40void nnQueryArray(
41 const nnNeuralNetwork*, nnQueryObject*, const R* input, R* output);
40 42
41/// Create a query object. 43/// Create a query object.
42/// 44///
diff --git a/src/lib/include/neuralnet/train.h b/src/lib/include/neuralnet/train.h
index 79f8e7b..6d811c2 100644
--- a/src/lib/include/neuralnet/train.h
+++ b/src/lib/include/neuralnet/train.h
@@ -14,18 +14,18 @@ typedef struct nnMatrix nnMatrix;
14/// activation with many inputs. Thus, a (0,1) initialization is really 14/// activation with many inputs. Thus, a (0,1) initialization is really
15/// (0,scale), for example. 15/// (0,scale), for example.
16typedef enum nnWeightInitStrategy { 16typedef enum nnWeightInitStrategy {
17 nnWeightInit01, // (0,1) range. 17 nnWeightInit01, // (0,1) range.
18 nnWeightInit11, // (-1,+1) range. 18 nnWeightInit11, // (-1,+1) range.
19 nnWeightInitNormal, // Normal distribution. 19 nnWeightInitNormal, // Normal distribution.
20} nnWeightInitStrategy; 20} nnWeightInitStrategy;
21 21
22/// Network training parameters. 22/// Network training parameters.
23typedef struct nnTrainingParams { 23typedef struct nnTrainingParams {
24 R learning_rate; 24 R learning_rate;
25 int max_iterations; 25 int max_iterations;
26 uint64_t seed; 26 uint64_t seed;
27 nnWeightInitStrategy weight_init; 27 nnWeightInitStrategy weight_init;
28 bool debug; 28 bool debug;
29} nnTrainingParams; 29} nnTrainingParams;
30 30
31/// Train the network. 31/// Train the network.
@@ -36,7 +36,5 @@ typedef struct nnTrainingParams {
36/// |targets| is a matrix of targets, one row per target and as many columns as 36/// |targets| is a matrix of targets, one row per target and as many columns as
37/// the target's dimension. 37/// the target's dimension.
38void nnTrain( 38void nnTrain(
39 nnNeuralNetwork*, 39 nnNeuralNetwork*, const nnMatrix* inputs, const nnMatrix* targets,
40 const nnMatrix* inputs, 40 const nnTrainingParams*);
41 const nnMatrix* targets,
42 const nnTrainingParams*);
diff --git a/src/lib/src/activation.h b/src/lib/src/activation.h
index 42ab73f..b56a69e 100644
--- a/src/lib/src/activation.h
+++ b/src/lib/src/activation.h
@@ -4,17 +4,13 @@
4 4
5#include <math.h> 5#include <math.h>
6 6
7static inline R sigmoid(R x) { 7static inline R sigmoid(R x) { return 1. / (1. + exp(-x)); }
8 return 1. / (1. + exp(-x));
9}
10 8
11static inline R relu(R x) { 9static inline R relu(R x) { return fmax(0, x); }
12 return fmax(0, x);
13}
14 10
15#define NN_MAP_ARRAY(f, in, out, size) \ 11#define NN_MAP_ARRAY(f, in, out, size) \
16 for (int i = 0; i < size; ++i) { \ 12 for (int i = 0; i < size; ++i) { \
17 out[i] = f(in[i]); \ 13 out[i] = f(in[i]); \
18 } 14 }
19 15
20#define sigmoid_array(in, out, size) NN_MAP_ARRAY(sigmoid, in, out, size) 16#define sigmoid_array(in, out, size) NN_MAP_ARRAY(sigmoid, in, out, size)
diff --git a/src/lib/src/matrix.c b/src/lib/src/matrix.c
index f937c01..174504f 100644
--- a/src/lib/src/matrix.c
+++ b/src/lib/src/matrix.c
@@ -8,10 +8,10 @@ nnMatrix nnMatrixMake(int rows, int cols) {
8 R* values = calloc(rows * cols, sizeof(R)); 8 R* values = calloc(rows * cols, sizeof(R));
9 assert(values != 0); 9 assert(values != 0);
10 10
11 return (nnMatrix) { 11 return (nnMatrix){
12 .rows = rows, 12 .rows = rows,
13 .cols = cols, 13 .cols = cols,
14 .values = values, 14 .values = values,
15 }; 15 };
16} 16}
17 17
@@ -21,8 +21,8 @@ void nnMatrixDel(nnMatrix* matrix) {
21 if (matrix->values != 0) { 21 if (matrix->values != 0) {
22 free(matrix->values); 22 free(matrix->values);
23 matrix->values = 0; 23 matrix->values = 0;
24 matrix->rows = 0; 24 matrix->rows = 0;
25 matrix->cols = 0; 25 matrix->cols = 0;
26 } 26 }
27} 27}
28 28
@@ -30,12 +30,12 @@ void nnMatrixMove(nnMatrix* in, nnMatrix* out) {
30 assert(in); 30 assert(in);
31 assert(out); 31 assert(out);
32 32
33 out->rows = in->rows; 33 out->rows = in->rows;
34 out->cols = in->cols; 34 out->cols = in->cols;
35 out->values = in->values; 35 out->values = in->values;
36 36
37 in->rows = 0; 37 in->rows = 0;
38 in->cols = 0; 38 in->cols = 0;
39 in->values = 0; 39 in->values = 0;
40} 40}
41 41
@@ -45,8 +45,8 @@ void nnMatrixCopy(const nnMatrix* in, nnMatrix* out) {
45 assert(in->rows == out->rows); 45 assert(in->rows == out->rows);
46 assert(in->cols == out->cols); 46 assert(in->cols == out->cols);
47 47
48 const R* in_value = in->values; 48 const R* in_value = in->values;
49 R* out_value = out->values; 49 R* out_value = out->values;
50 50
51 for (int i = 0; i < in->rows * in->cols; ++i) { 51 for (int i = 0; i < in->rows * in->cols; ++i) {
52 *out_value++ = *in_value++; 52 *out_value++ = *in_value++;
@@ -73,7 +73,8 @@ void nnMatrixRowToArray(const nnMatrix* in, int row, R* out) {
73 } 73 }
74} 74}
75 75
76void nnMatrixCopyCol(const nnMatrix* in, nnMatrix* out, int col_in, int col_out) { 76void nnMatrixCopyCol(
77 const nnMatrix* in, nnMatrix* out, int col_in, int col_out) {
77 assert(in); 78 assert(in);
78 assert(out); 79 assert(out);
79 assert(in->rows == out->rows); 80 assert(in->rows == out->rows);
@@ -89,8 +90,8 @@ nnMatrix nnMatrixBorrow(nnMatrix* in) {
89 assert(in); 90 assert(in);
90 91
91 nnMatrix out; 92 nnMatrix out;
92 out.rows = in->rows; 93 out.rows = in->rows;
93 out.cols = in->cols; 94 out.cols = in->cols;
94 out.values = in->values; 95 out.values = in->values;
95 return out; 96 return out;
96} 97}
@@ -101,8 +102,8 @@ nnMatrix nnMatrixBorrowRows(nnMatrix* in, int row_start, int num_rows) {
101 assert(row_start + num_rows <= in->rows); 102 assert(row_start + num_rows <= in->rows);
102 103
103 nnMatrix out; 104 nnMatrix out;
104 out.rows = num_rows; 105 out.rows = num_rows;
105 out.cols = in->cols; 106 out.cols = in->cols;
106 out.values = nnMatrixRow_mut(in, row_start); 107 out.values = nnMatrixRow_mut(in, row_start);
107 return out; 108 return out;
108} 109}
@@ -139,9 +140,9 @@ void nnMatrixMul(const nnMatrix* left, const nnMatrix* right, nnMatrix* out) {
139 const R* p_left_value = &left->values[i * left->cols]; 140 const R* p_left_value = &left->values[i * left->cols];
140 141
141 for (int j = 0; j < left->cols; ++j) { 142 for (int j = 0; j < left->cols; ++j) {
142 const R left_value = *p_left_value; 143 const R left_value = *p_left_value;
143 const R* right_value = &right->values[j * right->cols]; 144 const R* right_value = &right->values[j * right->cols];
144 R* out_value = &out->values[i * out->cols]; 145 R* out_value = &out->values[i * out->cols];
145 146
146 for (int k = 0; k < right->cols; ++k) { 147 for (int k = 0; k < right->cols; ++k) {
147 *out_value++ += left_value * *right_value++; 148 *out_value++ += left_value * *right_value++;
@@ -152,7 +153,8 @@ void nnMatrixMul(const nnMatrix* left, const nnMatrix* right, nnMatrix* out) {
152 } 153 }
153} 154}
154 155
155void nnMatrixMulRows(const nnMatrix* left, const nnMatrix* right, nnMatrix* out) { 156void nnMatrixMulRows(
157 const nnMatrix* left, const nnMatrix* right, nnMatrix* out) {
156 assert(left != 0); 158 assert(left != 0);
157 assert(right != 0); 159 assert(right != 0);
158 assert(out != 0); 160 assert(out != 0);
@@ -165,7 +167,7 @@ void nnMatrixMulRows(const nnMatrix* left, const nnMatrix* right, nnMatrix* out)
165 R* out_value = out->values; 167 R* out_value = out->values;
166 168
167 for (int i = 0; i < left->rows; ++i) { 169 for (int i = 0; i < left->rows; ++i) {
168 const R* left_row = &left->values[i * left->cols]; 170 const R* left_row = &left->values[i * left->cols];
169 const R* right_value = right->values; 171 const R* right_value = right->values;
170 172
171 for (int j = 0; j < right->rows; ++j) { 173 for (int j = 0; j < right->rows; ++j) {
@@ -181,7 +183,8 @@ void nnMatrixMulRows(const nnMatrix* left, const nnMatrix* right, nnMatrix* out)
181 } 183 }
182} 184}
183 185
184void nnMatrixMulAdd(const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out) { 186void nnMatrixMulAdd(
187 const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out) {
185 assert(left); 188 assert(left);
186 assert(right); 189 assert(right);
187 assert(out); 190 assert(out);
@@ -190,16 +193,17 @@ void nnMatrixMulAdd(const nnMatrix* left, const nnMatrix* right, R scale, nnMatr
190 assert(left->rows == out->rows); 193 assert(left->rows == out->rows);
191 assert(left->cols == out->cols); 194 assert(left->cols == out->cols);
192 195
193 const R* left_value = left->values; 196 const R* left_value = left->values;
194 const R* right_value = right->values; 197 const R* right_value = right->values;
195 R* out_value = out->values; 198 R* out_value = out->values;
196 199
197 for (int i = 0; i < left->rows * left->cols; ++i) { 200 for (int i = 0; i < left->rows * left->cols; ++i) {
198 *out_value++ = *left_value++ + *right_value++ * scale; 201 *out_value++ = *left_value++ + *right_value++ * scale;
199 } 202 }
200} 203}
201 204
202void nnMatrixMulSub(const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out) { 205void nnMatrixMulSub(
206 const nnMatrix* left, const nnMatrix* right, R scale, nnMatrix* out) {
203 assert(left); 207 assert(left);
204 assert(right); 208 assert(right);
205 assert(out); 209 assert(out);
@@ -208,16 +212,17 @@ void nnMatrixMulSub(const nnMatrix* left, const nnMatrix* right, R scale, nnMatr
208 assert(left->rows == out->rows); 212 assert(left->rows == out->rows);
209 assert(left->cols == out->cols); 213 assert(left->cols == out->cols);
210 214
211 const R* left_value = left->values; 215 const R* left_value = left->values;
212 const R* right_value = right->values; 216 const R* right_value = right->values;
213 R* out_value = out->values; 217 R* out_value = out->values;
214 218
215 for (int i = 0; i < left->rows * left->cols; ++i) { 219 for (int i = 0; i < left->rows * left->cols; ++i) {
216 *out_value++ = *left_value++ - *right_value++ * scale; 220 *out_value++ = *left_value++ - *right_value++ * scale;
217 } 221 }
218} 222}
219 223
220void nnMatrixMulPairs(const nnMatrix* left, const nnMatrix* right, nnMatrix* out) { 224void nnMatrixMulPairs(
225 const nnMatrix* left, const nnMatrix* right, nnMatrix* out) {
221 assert(left != 0); 226 assert(left != 0);
222 assert(right != 0); 227 assert(right != 0);
223 assert(out != 0); 228 assert(out != 0);
@@ -226,9 +231,9 @@ void nnMatrixMulPairs(const nnMatrix* left, const nnMatrix* right, nnMatrix* out
226 assert(left->rows == out->rows); 231 assert(left->rows == out->rows);
227 assert(left->cols == out->cols); 232 assert(left->cols == out->cols);
228 233
229 R* left_value = left->values; 234 R* left_value = left->values;
230 R* right_value = right->values; 235 R* right_value = right->values;
231 R* out_value = out->values; 236 R* out_value = out->values;
232 237
233 for (int i = 0; i < left->rows * left->cols; ++i) { 238 for (int i = 0; i < left->rows * left->cols; ++i) {
234 *out_value++ = *left_value++ * *right_value++; 239 *out_value++ = *left_value++ * *right_value++;
@@ -244,9 +249,9 @@ void nnMatrixAdd(const nnMatrix* left, const nnMatrix* right, nnMatrix* out) {
244 assert(left->rows == out->rows); 249 assert(left->rows == out->rows);
245 assert(left->cols == out->cols); 250 assert(left->cols == out->cols);
246 251
247 const R* left_value = left->values; 252 const R* left_value = left->values;
248 const R* right_value = right->values; 253 const R* right_value = right->values;
249 R* out_value = out->values; 254 R* out_value = out->values;
250 255
251 for (int i = 0; i < left->rows * left->cols; ++i) { 256 for (int i = 0; i < left->rows * left->cols; ++i) {
252 *out_value++ = *left_value++ + *right_value++; 257 *out_value++ = *left_value++ + *right_value++;
@@ -262,16 +267,17 @@ void nnMatrixSub(const nnMatrix* left, const nnMatrix* right, nnMatrix* out) {
262 assert(left->rows == out->rows); 267 assert(left->rows == out->rows);
263 assert(left->cols == out->cols); 268 assert(left->cols == out->cols);
264 269
265 const R* left_value = left->values; 270 const R* left_value = left->values;
266 const R* right_value = right->values; 271 const R* right_value = right->values;
267 R* out_value = out->values; 272 R* out_value = out->values;
268 273
269 for (int i = 0; i < left->rows * left->cols; ++i) { 274 for (int i = 0; i < left->rows * left->cols; ++i) {
270 *out_value++ = *left_value++ - *right_value++; 275 *out_value++ = *left_value++ - *right_value++;
271 } 276 }
272} 277}
273 278
274void nnMatrixAddRow(const nnMatrix* matrix, const nnMatrix* row, nnMatrix* out) { 279void nnMatrixAddRow(
280 const nnMatrix* matrix, const nnMatrix* row, nnMatrix* out) {
275 assert(matrix); 281 assert(matrix);
276 assert(row); 282 assert(row);
277 assert(out); 283 assert(out);
@@ -281,7 +287,7 @@ void nnMatrixAddRow(const nnMatrix* matrix, const nnMatrix* row, nnMatrix* out)
281 assert(matrix->cols == out->cols); 287 assert(matrix->cols == out->cols);
282 288
283 const R* matrix_value = matrix->values; 289 const R* matrix_value = matrix->values;
284 R* out_value = out->values; 290 R* out_value = out->values;
285 291
286 for (int i = 0; i < matrix->rows; ++i) { 292 for (int i = 0; i < matrix->rows; ++i) {
287 const R* row_value = row->values; 293 const R* row_value = row->values;
@@ -320,8 +326,8 @@ void nnMatrixGt(const nnMatrix* in, R threshold, nnMatrix* out) {
320 assert(in->rows == out->rows); 326 assert(in->rows == out->rows);
321 assert(in->cols == out->cols); 327 assert(in->cols == out->cols);
322 328
323 const R* in_value = in->values; 329 const R* in_value = in->values;
324 R* out_value = out->values; 330 R* out_value = out->values;
325 331
326 for (int i = 0; i < in->rows * in->cols; ++i) { 332 for (int i = 0; i < in->rows * in->cols; ++i) {
327 *out_value++ = (*in_value++) > threshold ? 1 : 0; 333 *out_value++ = (*in_value++) > threshold ? 1 : 0;
diff --git a/src/lib/src/neuralnet.c b/src/lib/src/neuralnet.c
index cac611a..a5fc59b 100644
--- a/src/lib/src/neuralnet.c
+++ b/src/lib/src/neuralnet.c
@@ -1,13 +1,14 @@
1#include <neuralnet/neuralnet.h> 1#include <neuralnet/neuralnet.h>
2 2
3#include <neuralnet/matrix.h>
4#include "activation.h" 3#include "activation.h"
5#include "neuralnet_impl.h" 4#include "neuralnet_impl.h"
5#include <neuralnet/matrix.h>
6 6
7#include <assert.h> 7#include <assert.h>
8#include <stdlib.h> 8#include <stdlib.h>
9 9
10nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActivation* activations) { 10nnNeuralNetwork* nnMakeNet(
11 int num_layers, const int* layer_sizes, const nnActivation* activations) {
11 assert(num_layers > 0); 12 assert(num_layers > 0);
12 assert(layer_sizes); 13 assert(layer_sizes);
13 assert(activations); 14 assert(activations);
@@ -19,10 +20,10 @@ nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActiv
19 20
20 net->num_layers = num_layers; 21 net->num_layers = num_layers;
21 22
22 net->weights = calloc(num_layers, sizeof(nnMatrix)); 23 net->weights = calloc(num_layers, sizeof(nnMatrix));
23 net->biases = calloc(num_layers, sizeof(nnMatrix)); 24 net->biases = calloc(num_layers, sizeof(nnMatrix));
24 net->activations = calloc(num_layers, sizeof(nnActivation)); 25 net->activations = calloc(num_layers, sizeof(nnActivation));
25 if ( (net->weights == 0) || (net->biases == 0) || (net->activations == 0) ) { 26 if ((net->weights == 0) || (net->biases == 0) || (net->activations == 0)) {
26 nnDeleteNet(&net); 27 nnDeleteNet(&net);
27 return 0; 28 return 0;
28 } 29 }
@@ -30,15 +31,15 @@ nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActiv
30 for (int l = 0; l < num_layers; ++l) { 31 for (int l = 0; l < num_layers; ++l) {
31 // layer_sizes = { input layer size, first hidden layer size, ...} 32 // layer_sizes = { input layer size, first hidden layer size, ...}
32 const int layer_input_size = layer_sizes[l]; 33 const int layer_input_size = layer_sizes[l];
33 const int layer_output_size = layer_sizes[l+1]; 34 const int layer_output_size = layer_sizes[l + 1];
34 35
35 // We store the transpose of the weight matrix as written in textbooks. 36 // We store the transpose of the weight matrix as written in textbooks.
36 // Our vectors are row vectors and the matrices row-major. 37 // Our vectors are row vectors and the matrices row-major.
37 const int rows = layer_input_size; 38 const int rows = layer_input_size;
38 const int cols = layer_output_size; 39 const int cols = layer_output_size;
39 40
40 net->weights[l] = nnMatrixMake(rows, cols); 41 net->weights[l] = nnMatrixMake(rows, cols);
41 net->biases[l] = nnMatrixMake(1, cols); 42 net->biases[l] = nnMatrixMake(1, cols);
42 net->activations[l] = activations[l]; 43 net->activations[l] = activations[l];
43 } 44 }
44 45
@@ -46,7 +47,7 @@ nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActiv
46} 47}
47 48
48void nnDeleteNet(nnNeuralNetwork** net) { 49void nnDeleteNet(nnNeuralNetwork** net) {
49 if ( (!net) || (!(*net)) ) { 50 if ((!net) || (!(*net))) {
50 return; 51 return;
51 } 52 }
52 if ((*net)->weights != 0) { 53 if ((*net)->weights != 0) {
@@ -77,7 +78,7 @@ void nnSetWeights(nnNeuralNetwork* net, const R* weights) {
77 78
78 for (int l = 0; l < net->num_layers; ++l) { 79 for (int l = 0; l < net->num_layers; ++l) {
79 nnMatrix* layer_weights = &net->weights[l]; 80 nnMatrix* layer_weights = &net->weights[l];
80 R* layer_values = layer_weights->values; 81 R* layer_values = layer_weights->values;
81 82
82 for (int j = 0; j < layer_weights->rows * layer_weights->cols; ++j) { 83 for (int j = 0; j < layer_weights->rows * layer_weights->cols; ++j) {
83 *layer_values++ = *weights++; 84 *layer_values++ = *weights++;
@@ -91,7 +92,7 @@ void nnSetBiases(nnNeuralNetwork* net, const R* biases) {
91 92
92 for (int l = 0; l < net->num_layers; ++l) { 93 for (int l = 0; l < net->num_layers; ++l) {
93 nnMatrix* layer_biases = &net->biases[l]; 94 nnMatrix* layer_biases = &net->biases[l];
94 R* layer_values = layer_biases->values; 95 R* layer_values = layer_biases->values;
95 96
96 for (int j = 0; j < layer_biases->rows * layer_biases->cols; ++j) { 97 for (int j = 0; j < layer_biases->rows * layer_biases->cols; ++j) {
97 *layer_values++ = *biases++; 98 *layer_values++ = *biases++;
@@ -99,7 +100,8 @@ void nnSetBiases(nnNeuralNetwork* net, const R* biases) {
99 } 100 }
100} 101}
101 102
102void nnQuery(const nnNeuralNetwork* net, nnQueryObject* query, const nnMatrix* input) { 103void nnQuery(
104 const nnNeuralNetwork* net, nnQueryObject* query, const nnMatrix* input) {
103 assert(net); 105 assert(net);
104 assert(query); 106 assert(query);
105 assert(input); 107 assert(input);
@@ -123,29 +125,34 @@ void nnQuery(const nnNeuralNetwork* net, nnQueryObject* query, const nnMatrix* i
123 // We could also rewrite the original Mul function to go row x row, 125 // We could also rewrite the original Mul function to go row x row,
124 // decomposing the multiplication. Preserving the original meaning of Mul 126 // decomposing the multiplication. Preserving the original meaning of Mul
125 // makes everything clearer. 127 // makes everything clearer.
126 nnMatrix output_vector = nnMatrixBorrowRows(&query->layer_outputs[l], i, 1); 128 nnMatrix output_vector =
129 nnMatrixBorrowRows(&query->layer_outputs[l], i, 1);
127 nnMatrixMul(&input_vector, layer_weights, &output_vector); 130 nnMatrixMul(&input_vector, layer_weights, &output_vector);
128 nnMatrixAddRow(&output_vector, layer_biases, &output_vector); 131 nnMatrixAddRow(&output_vector, layer_biases, &output_vector);
129 132
130 switch (net->activations[l]) { 133 switch (net->activations[l]) {
131 case nnIdentity: 134 case nnIdentity:
132 break; // Nothing to do for the identity function. 135 break; // Nothing to do for the identity function.
133 case nnSigmoid: 136 case nnSigmoid:
134 sigmoid_array(output_vector.values, output_vector.values, output_vector.cols); 137 sigmoid_array(
135 break; 138 output_vector.values, output_vector.values, output_vector.cols);
136 case nnRelu: 139 break;
137 relu_array(output_vector.values, output_vector.values, output_vector.cols); 140 case nnRelu:
138 break; 141 relu_array(
139 default: 142 output_vector.values, output_vector.values, output_vector.cols);
140 assert(0); 143 break;
144 default:
145 assert(0);
141 } 146 }
142 147
143 input_vector = output_vector; // Borrow. 148 input_vector = output_vector; // Borrow.
144 } 149 }
145 } 150 }
146} 151}
147 152
148void nnQueryArray(const nnNeuralNetwork* net, nnQueryObject* query, const R* input, R* output) { 153void nnQueryArray(
154 const nnNeuralNetwork* net, nnQueryObject* query, const R* input,
155 R* output) {
149 assert(net); 156 assert(net);
150 assert(query); 157 assert(query);
151 assert(input); 158 assert(input);
@@ -177,9 +184,9 @@ nnQueryObject* nnMakeQueryObject(const nnNeuralNetwork* net, int num_inputs) {
177 return 0; 184 return 0;
178 } 185 }
179 for (int l = 0; l < net->num_layers; ++l) { 186 for (int l = 0; l < net->num_layers; ++l) {
180 const nnMatrix* layer_weights = &net->weights[l]; 187 const nnMatrix* layer_weights = &net->weights[l];
181 const int layer_output_size = nnLayerOutputSize(layer_weights); 188 const int layer_output_size = nnLayerOutputSize(layer_weights);
182 query->layer_outputs[l] = nnMatrixMake(num_inputs, layer_output_size); 189 query->layer_outputs[l] = nnMatrixMake(num_inputs, layer_output_size);
183 } 190 }
184 query->network_outputs = &query->layer_outputs[net->num_layers - 1]; 191 query->network_outputs = &query->layer_outputs[net->num_layers - 1];
185 192
@@ -187,7 +194,7 @@ nnQueryObject* nnMakeQueryObject(const nnNeuralNetwork* net, int num_inputs) {
187} 194}
188 195
189void nnDeleteQueryObject(nnQueryObject** query) { 196void nnDeleteQueryObject(nnQueryObject** query) {
190 if ( (!query) || (!(*query)) ) { 197 if ((!query) || (!(*query))) {
191 return; 198 return;
192 } 199 }
193 if ((*query)->layer_outputs != 0) { 200 if ((*query)->layer_outputs != 0) {
diff --git a/src/lib/src/neuralnet_impl.h b/src/lib/src/neuralnet_impl.h
index 26107b5..18694f4 100644
--- a/src/lib/src/neuralnet_impl.h
+++ b/src/lib/src/neuralnet_impl.h
@@ -14,10 +14,10 @@
14/// 14///
15/// w11 w12 w21 w22 15/// w11 w12 w21 w22
16typedef struct nnNeuralNetwork { 16typedef struct nnNeuralNetwork {
17 int num_layers; // Number of non-input layers (hidden + output). 17 int num_layers; // Number of non-input layers (hidden + output).
18 nnMatrix* weights; // One matrix per non-input layer. 18 nnMatrix* weights; // One matrix per non-input layer.
19 nnMatrix* biases; // One vector per non-input layer. 19 nnMatrix* biases; // One vector per non-input layer.
20 nnActivation* activations; // One per non-input layer. 20 nnActivation* activations; // One per non-input layer.
21} nnNeuralNetwork; 21} nnNeuralNetwork;
22 22
23/// A query object that holds all the memory necessary to query a network. 23/// A query object that holds all the memory necessary to query a network.
@@ -31,6 +31,6 @@ typedef struct nnNeuralNetwork {
31/// convenience. 31/// convenience.
32typedef struct nnQueryObject { 32typedef struct nnQueryObject {
33 int num_layers; 33 int num_layers;
34 nnMatrix* layer_outputs; // Output matrices, one output per layer. 34 nnMatrix* layer_outputs; // Output matrices, one output per layer.
35 nnMatrix* network_outputs; // Points to the last output matrix. 35 nnMatrix* network_outputs; // Points to the last output matrix.
36} nnTrainingQueryObject; 36} nnTrainingQueryObject;
diff --git a/src/lib/src/train.c b/src/lib/src/train.c
index 3061a99..9244907 100644
--- a/src/lib/src/train.c
+++ b/src/lib/src/train.c
@@ -1,7 +1,7 @@
1#include <neuralnet/train.h> 1#include <neuralnet/train.h>
2 2
3#include <neuralnet/matrix.h>
4#include "neuralnet_impl.h" 3#include "neuralnet_impl.h"
4#include <neuralnet/matrix.h>
5 5
6#include <random/mt19937-64.h> 6#include <random/mt19937-64.h>
7#include <random/normal.h> 7#include <random/normal.h>
@@ -14,13 +14,13 @@
14#define LOGD printf 14#define LOGD printf
15 15
16// If debug mode is requested, we will show progress every this many iterations. 16// If debug mode is requested, we will show progress every this many iterations.
17static const int PROGRESS_THRESHOLD = 5; // % 17static const int PROGRESS_THRESHOLD = 5; // %
18 18
19/// Computes the total MSE from the output error matrix. 19/// Computes the total MSE from the output error matrix.
20R ComputeMSE(const nnMatrix* errors) { 20R ComputeMSE(const nnMatrix* errors) {
21 R sum_sq = 0; 21 R sum_sq = 0;
22 const int N = errors->rows * errors->cols; 22 const int N = errors->rows * errors->cols;
23 const R* value = errors->values; 23 const R* value = errors->values;
24 for (int i = 0; i < N; ++i) { 24 for (int i = 0; i < N; ++i) {
25 sum_sq += *value * *value; 25 sum_sq += *value * *value;
26 value++; 26 value++;
@@ -30,7 +30,7 @@ R ComputeMSE(const nnMatrix* errors) {
30 30
31/// Holds the bits required to compute a sigmoid gradient. 31/// Holds the bits required to compute a sigmoid gradient.
32typedef struct nnSigmoidGradientElements { 32typedef struct nnSigmoidGradientElements {
33 nnMatrix ones; // A vector of just ones, same size as the layer. 33 nnMatrix ones; // A vector of just ones, same size as the layer.
34} nnSigmoidGradientElements; 34} nnSigmoidGradientElements;
35 35
36/// Holds the various elements required to compute gradients. These depend on 36/// Holds the various elements required to compute gradients. These depend on
@@ -49,7 +49,8 @@ typedef struct nnGradientElements {
49} nnGradientElements; 49} nnGradientElements;
50 50
51// Initialize the network's weights randomly and set their biases to 0. 51// Initialize the network's weights randomly and set their biases to 0.
52void nnInitNet(nnNeuralNetwork* net, uint64_t seed, const nnWeightInitStrategy strategy) { 52void nnInitNet(
53 nnNeuralNetwork* net, uint64_t seed, const nnWeightInitStrategy strategy) {
53 assert(net); 54 assert(net);
54 55
55 mt19937_64 rng = mt19937_64_make(); 56 mt19937_64 rng = mt19937_64_make();
@@ -60,41 +61,42 @@ void nnInitNet(nnNeuralNetwork* net, uint64_t seed, const nnWeightInitStrategy s
60 nnMatrix* biases = &net->biases[l]; 61 nnMatrix* biases = &net->biases[l];
61 62
62 const R layer_size = (R)nnLayerInputSize(weights); 63 const R layer_size = (R)nnLayerInputSize(weights);
63 const R scale = 1. / layer_size; 64 const R scale = 1. / layer_size;
64 const R stdev = 1. / sqrt((R)layer_size); 65 const R stdev = 1. / sqrt((R)layer_size);
65 const R sigma = stdev * stdev; 66 const R sigma = stdev * stdev;
66 67
67 R* value = weights->values; 68 R* value = weights->values;
68 for (int k = 0; k < weights->rows * weights->cols; ++k) { 69 for (int k = 0; k < weights->rows * weights->cols; ++k) {
69 switch (strategy) { 70 switch (strategy) {
70 case nnWeightInit01: { 71 case nnWeightInit01: {
71 const R x01 = mt19937_64_gen_real3(&rng); // (0, +1) interval. 72 const R x01 = mt19937_64_gen_real3(&rng); // (0, +1) interval.
72 *value++ = scale * x01; 73 *value++ = scale * x01;
73 break; 74 break;
74 } 75 }
75 case nnWeightInit11: { 76 case nnWeightInit11: {
76 const R x11 = mt19937_64_gen_real4(&rng); // (-1, +1) interval. 77 const R x11 = mt19937_64_gen_real4(&rng); // (-1, +1) interval.
77 *value++ = scale * x11; 78 *value++ = scale * x11;
78 break; 79 break;
80 }
81 case nnWeightInitNormal: {
82 // Using initialization with a normal distribution of standard
83 // deviation 1 / sqrt(num_layer_weights) to prevent saturation when
84 // multiplying inputs.
85 const R u01 = mt19937_64_gen_real3(&rng); // (0, +1) interval.
86 const R v01 = mt19937_64_gen_real3(&rng); // (0, +1) interval.
87 R z0, z1;
88 normal2(u01, v01, &z0, &z1);
89 z0 = normal_transform(z0, /*mu=*/0, sigma);
90 z1 = normal_transform(z1, /*mu=*/0, sigma);
91 *value++ = z0;
92 if (k < weights->rows * weights->cols - 1) {
93 *value++ = z1;
94 ++k;
79 } 95 }
80 case nnWeightInitNormal: 96 break;
81 // Using initialization with a normal distribution of standard 97 }
82 // deviation 1 / sqrt(num_layer_weights) to prevent saturation when 98 default:
83 // multiplying inputs. 99 assert(false);
84 const R u01 = mt19937_64_gen_real3(&rng); // (0, +1) interval.
85 const R v01 = mt19937_64_gen_real3(&rng); // (0, +1) interval.
86 R z0, z1;
87 normal2(u01, v01, &z0, &z1);
88 z0 = normal_transform(z0, /*mu=*/0, sigma);
89 z1 = normal_transform(z1, /*mu=*/0, sigma);
90 *value++ = z0;
91 if (k < weights->rows * weights->cols - 1) {
92 *value++ = z1;
93 ++k;
94 }
95 break;
96 default:
97 assert(false);
98 } 100 }
99 } 101 }
100 102
@@ -112,9 +114,7 @@ void nnInitNet(nnNeuralNetwork* net, uint64_t seed, const nnWeightInitStrategy s
112// 114//
113// For now, each iteration trains with one sample (row) at a time. 115// For now, each iteration trains with one sample (row) at a time.
114void nnTrain( 116void nnTrain(
115 nnNeuralNetwork* net, 117 nnNeuralNetwork* net, const nnMatrix* inputs, const nnMatrix* targets,
116 const nnMatrix* inputs,
117 const nnMatrix* targets,
118 const nnTrainingParams* params) { 118 const nnTrainingParams* params) {
119 assert(net); 119 assert(net);
120 assert(inputs); 120 assert(inputs);
@@ -129,34 +129,35 @@ void nnTrain(
129 nnMatrix* errors = calloc(net->num_layers, sizeof(nnMatrix)); 129 nnMatrix* errors = calloc(net->num_layers, sizeof(nnMatrix));
130 130
131 // Allocate the weight transpose matrices up front for backpropagation. 131 // Allocate the weight transpose matrices up front for backpropagation.
132 //nnMatrix* weights_T = calloc(net->num_layers, sizeof(nnMatrix)); 132 // nnMatrix* weights_T = calloc(net->num_layers, sizeof(nnMatrix));
133 133
134 // Allocate the weight delta matrices. 134 // Allocate the weight delta matrices.
135 nnMatrix* weight_deltas = calloc(net->num_layers, sizeof(nnMatrix)); 135 nnMatrix* weight_deltas = calloc(net->num_layers, sizeof(nnMatrix));
136 136
137 // Allocate the data structures required to compute gradients. 137 // Allocate the data structures required to compute gradients.
138 // This depends on each layer's activation type. 138 // This depends on each layer's activation type.
139 nnGradientElements* gradient_elems = calloc(net->num_layers, sizeof(nnGradientElements)); 139 nnGradientElements* gradient_elems =
140 calloc(net->num_layers, sizeof(nnGradientElements));
140 141
141 // Allocate the output transpose vectors for weight delta calculation. 142 // Allocate the output transpose vectors for weight delta calculation.
142 // This is one column vector per layer. 143 // This is one column vector per layer.
143 nnMatrix* outputs_T = calloc(net->num_layers, sizeof(nnMatrix)); 144 nnMatrix* outputs_T = calloc(net->num_layers, sizeof(nnMatrix));
144 145
145 assert(errors != 0); 146 assert(errors != 0);
146 //assert(weights_T != 0); 147 // assert(weights_T != 0);
147 assert(weight_deltas != 0); 148 assert(weight_deltas != 0);
148 assert(gradient_elems); 149 assert(gradient_elems);
149 assert(outputs_T); 150 assert(outputs_T);
150 151
151 for (int l = 0; l < net->num_layers; ++l) { 152 for (int l = 0; l < net->num_layers; ++l) {
152 const nnMatrix* layer_weights = &net->weights[l]; 153 const nnMatrix* layer_weights = &net->weights[l];
153 const int layer_output_size = net->weights[l].cols; 154 const int layer_output_size = net->weights[l].cols;
154 const nnActivation activation = net->activations[l]; 155 const nnActivation activation = net->activations[l];
155 156
156 errors[l] = nnMatrixMake(1, layer_weights->cols); 157 errors[l] = nnMatrixMake(1, layer_weights->cols);
157 158
158 //weights_T[l] = nnMatrixMake(layer_weights->cols, layer_weights->rows); 159 // weights_T[l] = nnMatrixMake(layer_weights->cols, layer_weights->rows);
159 //nnMatrixTranspose(layer_weights, &weights_T[l]); 160 // nnMatrixTranspose(layer_weights, &weights_T[l]);
160 161
161 weight_deltas[l] = nnMatrixMake(layer_weights->rows, layer_weights->cols); 162 weight_deltas[l] = nnMatrixMake(layer_weights->rows, layer_weights->cols);
162 163
@@ -164,21 +165,21 @@ void nnTrain(
164 165
165 // Allocate the gradient elements and vectors for weight delta calculation. 166 // Allocate the gradient elements and vectors for weight delta calculation.
166 nnGradientElements* elems = &gradient_elems[l]; 167 nnGradientElements* elems = &gradient_elems[l];
167 elems->type = activation; 168 elems->type = activation;
168 switch (activation) { 169 switch (activation) {
169 case nnIdentity: 170 case nnIdentity:
170 break; // Gradient vector will be borrowed, no need to allocate. 171 break; // Gradient vector will be borrowed, no need to allocate.
171 172
172 case nnSigmoid: 173 case nnSigmoid:
173 elems->gradient = nnMatrixMake(1, layer_output_size); 174 elems->gradient = nnMatrixMake(1, layer_output_size);
174 // Allocate the 1s vectors. 175 // Allocate the 1s vectors.
175 elems->sigmoid.ones = nnMatrixMake(1, layer_output_size); 176 elems->sigmoid.ones = nnMatrixMake(1, layer_output_size);
176 nnMatrixInitConstant(&elems->sigmoid.ones, 1); 177 nnMatrixInitConstant(&elems->sigmoid.ones, 1);
177 break; 178 break;
178 179
179 case nnRelu: 180 case nnRelu:
180 elems->gradient = nnMatrixMake(1, layer_output_size); 181 elems->gradient = nnMatrixMake(1, layer_output_size);
181 break; 182 break;
182 } 183 }
183 } 184 }
184 185
@@ -195,9 +196,9 @@ void nnTrain(
195 196
196 // If debug mode is requested, we will show progress every Nth iteration. 197 // If debug mode is requested, we will show progress every Nth iteration.
197 const int progress_frame = 198 const int progress_frame =
198 (params->max_iterations < PROGRESS_THRESHOLD) 199 (params->max_iterations < PROGRESS_THRESHOLD)
199 ? 1 200 ? 1
200 : (params->max_iterations * PROGRESS_THRESHOLD / 100); 201 : (params->max_iterations * PROGRESS_THRESHOLD / 100);
201 202
202 // --- TRAIN 203 // --- TRAIN
203 204
@@ -209,8 +210,10 @@ void nnTrain(
209 for (int sample = 0; sample < inputs->rows; ++sample) { 210 for (int sample = 0; sample < inputs->rows; ++sample) {
210 // Slice the input and target matrices with the batch size. 211 // Slice the input and target matrices with the batch size.
211 // We are not mutating the inputs, but we need the cast to borrow. 212 // We are not mutating the inputs, but we need the cast to borrow.
212 nnMatrix training_inputs = nnMatrixBorrowRows((nnMatrix*)inputs, sample, 1); 213 nnMatrix training_inputs =
213 nnMatrix training_targets = nnMatrixBorrowRows((nnMatrix*)targets, sample, 1); 214 nnMatrixBorrowRows((nnMatrix*)inputs, sample, 1);
215 nnMatrix training_targets =
216 nnMatrixBorrowRows((nnMatrix*)targets, sample, 1);
214 217
215 // Will need the input transposed for backpropagation. 218 // Will need the input transposed for backpropagation.
216 // Assuming one training input per iteration for now. 219 // Assuming one training input per iteration for now.
@@ -221,8 +224,10 @@ void nnTrain(
221 // part of the derivative, -2(t-o). Also, we compute o-t instead to 224 // part of the derivative, -2(t-o). Also, we compute o-t instead to
222 // remove that outer negative sign. 225 // remove that outer negative sign.
223 nnQuery(net, query, &training_inputs); 226 nnQuery(net, query, &training_inputs);
224 //nnMatrixSub(&training_targets, training_outputs, &errors[net->num_layers - 1]); 227 // nnMatrixSub(&training_targets, training_outputs,
225 nnMatrixSub(training_outputs, &training_targets, &errors[net->num_layers - 1]); 228 // &errors[net->num_layers - 1]);
229 nnMatrixSub(
230 training_outputs, &training_targets, &errors[net->num_layers - 1]);
226 231
227 // Update outputs_T, which we need during weight updates. 232 // Update outputs_T, which we need during weight updates.
228 for (int l = 0; l < net->num_layers; ++l) { 233 for (int l = 0; l < net->num_layers; ++l) {
@@ -232,12 +237,12 @@ void nnTrain(
232 // Update weights and biases for each internal layer, backpropagating 237 // Update weights and biases for each internal layer, backpropagating
233 // errors along the way. 238 // errors along the way.
234 for (int l = net->num_layers - 1; l >= 0; --l) { 239 for (int l = net->num_layers - 1; l >= 0; --l) {
235 const nnMatrix* layer_output = &query->layer_outputs[l]; 240 const nnMatrix* layer_output = &query->layer_outputs[l];
236 nnMatrix* layer_weights = &net->weights[l]; 241 nnMatrix* layer_weights = &net->weights[l];
237 nnMatrix* layer_biases = &net->biases[l]; 242 nnMatrix* layer_biases = &net->biases[l];
238 nnGradientElements* elems = &gradient_elems[l]; 243 nnGradientElements* elems = &gradient_elems[l];
239 nnMatrix* gradient = &elems->gradient; 244 nnMatrix* gradient = &elems->gradient;
240 const nnActivation activation = net->activations[l]; 245 const nnActivation activation = net->activations[l];
241 246
242 // Compute the gradient (the part of the expression that does not 247 // Compute the gradient (the part of the expression that does not
243 // contain the output of the previous layer). 248 // contain the output of the previous layer).
@@ -246,55 +251,58 @@ void nnTrain(
246 // Sigmoid: G = error_k * output_k * (1 - output_k). 251 // Sigmoid: G = error_k * output_k * (1 - output_k).
247 // Relu: G = error_k * (output_k > 0 ? 1 : 0) 252 // Relu: G = error_k * (output_k > 0 ? 1 : 0)
248 switch (activation) { 253 switch (activation) {
249 case nnIdentity: 254 case nnIdentity:
250 // TODO: Just copy the pointer? 255 // TODO: Just copy the pointer?
251 *gradient = nnMatrixBorrow(&errors[l]); 256 *gradient = nnMatrixBorrow(&errors[l]);
252 break; 257 break;
253 case nnSigmoid: 258 case nnSigmoid:
254 nnMatrixSub(&elems->sigmoid.ones, layer_output, gradient); 259 nnMatrixSub(&elems->sigmoid.ones, layer_output, gradient);
255 nnMatrixMulPairs(layer_output, gradient, gradient); 260 nnMatrixMulPairs(layer_output, gradient, gradient);
256 nnMatrixMulPairs(&errors[l], gradient, gradient); 261 nnMatrixMulPairs(&errors[l], gradient, gradient);
257 break; 262 break;
258 case nnRelu: 263 case nnRelu:
259 nnMatrixGt(layer_output, 0, gradient); 264 nnMatrixGt(layer_output, 0, gradient);
260 nnMatrixMulPairs(&errors[l], gradient, gradient); 265 nnMatrixMulPairs(&errors[l], gradient, gradient);
261 break; 266 break;
262 } 267 }
263 268
264 // Outer product to compute the weight deltas. 269 // Outer product to compute the weight deltas.
265 const nnMatrix* output_T = (l == 0) ? &training_inputs_T : &outputs_T[l-1]; 270 const nnMatrix* output_T =
271 (l == 0) ? &training_inputs_T : &outputs_T[l - 1];
266 nnMatrixMul(output_T, gradient, &weight_deltas[l]); 272 nnMatrixMul(output_T, gradient, &weight_deltas[l]);
267 273
268 // Backpropagate the error before updating weights. 274 // Backpropagate the error before updating weights.
269 if (l > 0) { 275 if (l > 0) {
270 // G * W^T == G *^T W. 276 // G * W^T == G *^T W.
271 //nnMatrixMul(gradient, &weights_T[l], &errors[l-1]); 277 // nnMatrixMul(gradient, &weights_T[l], &errors[l-1]);
272 nnMatrixMulRows(gradient, layer_weights, &errors[l-1]); 278 nnMatrixMulRows(gradient, layer_weights, &errors[l - 1]);
273 } 279 }
274 280
275 // Update weights. 281 // Update weights.
276 nnMatrixScale(&weight_deltas[l], params->learning_rate); 282 nnMatrixScale(&weight_deltas[l], params->learning_rate);
277 // The gradient has a negative sign from -(t - o), but we have computed 283 // The gradient has a negative sign from -(t - o), but we have computed
278 // e = o - t instead, so we can subtract directly. 284 // e = o - t instead, so we can subtract directly.
279 //nnMatrixAdd(layer_weights, &weight_deltas[l], layer_weights); 285 // nnMatrixAdd(layer_weights, &weight_deltas[l], layer_weights);
280 nnMatrixSub(layer_weights, &weight_deltas[l], layer_weights); 286 nnMatrixSub(layer_weights, &weight_deltas[l], layer_weights);
281 287
282 // Update weight transpose matrix for the next training iteration. 288 // Update weight transpose matrix for the next training iteration.
283 //nnMatrixTranspose(layer_weights, &weights_T[l]); 289 // nnMatrixTranspose(layer_weights, &weights_T[l]);
284 290
285 // Update biases. 291 // Update biases.
286 // This is the same formula as for weights, except that the o_j term is 292 // This is the same formula as for weights, except that the o_j term is
287 // just 1. We can simply re-use the gradient that we have already 293 // just 1. We can simply re-use the gradient that we have already
288 // computed for the weight update. 294 // computed for the weight update.
289 //nnMatrixMulAdd(layer_biases, gradient, params->learning_rate, layer_biases); 295 // nnMatrixMulAdd(layer_biases, gradient, params->learning_rate,
290 nnMatrixMulSub(layer_biases, gradient, params->learning_rate, layer_biases); 296 // layer_biases);
297 nnMatrixMulSub(
298 layer_biases, gradient, params->learning_rate, layer_biases);
291 } 299 }
292 300
293 // TODO: Add this under a verbose debugging mode. 301 // TODO: Add this under a verbose debugging mode.
294 // if (params->debug) { 302 // if (params->debug) {
295 // LOGD("Iter: %d, Sample: %d, Error: %f\n", iter, sample, ComputeMSE(&errors[net->num_layers - 1])); 303 // LOGD("Iter: %d, Sample: %d, Error: %f\n", iter, sample,
296 // LOGD("TGT: "); 304 // ComputeMSE(&errors[net->num_layers - 1])); LOGD("TGT: "); for (int i
297 // for (int i = 0; i < training_targets.cols; ++i) { 305 // = 0; i < training_targets.cols; ++i) {
298 // printf("%.3f ", training_targets.values[i]); 306 // printf("%.3f ", training_targets.values[i]);
299 // } 307 // }
300 // printf("\n"); 308 // printf("\n");
@@ -307,42 +315,44 @@ void nnTrain(
307 } 315 }
308 316
309 if (params->debug && ((iter % progress_frame) == 0)) { 317 if (params->debug && ((iter % progress_frame) == 0)) {
310 LOGD("Iter: %d/%d, Error: %f\n", 318 LOGD(
311 iter, params->max_iterations, ComputeMSE(&errors[net->num_layers - 1])); 319 "Iter: %d/%d, Error: %f\n", iter, params->max_iterations,
320 ComputeMSE(&errors[net->num_layers - 1]));
312 } 321 }
313 } 322 }
314 323
315 // Print the final error. 324 // Print the final error.
316 if (params->debug) { 325 if (params->debug) {
317 LOGD("Iter: %d/%d, Error: %f\n", 326 LOGD(
318 params->max_iterations, params->max_iterations, ComputeMSE(&errors[net->num_layers - 1])); 327 "Iter: %d/%d, Error: %f\n", params->max_iterations,
328 params->max_iterations, ComputeMSE(&errors[net->num_layers - 1]));
319 } 329 }
320 330
321 for (int l = 0; l < net->num_layers; ++l) { 331 for (int l = 0; l < net->num_layers; ++l) {
322 nnMatrixDel(&errors[l]); 332 nnMatrixDel(&errors[l]);
323 nnMatrixDel(&outputs_T[l]); 333 nnMatrixDel(&outputs_T[l]);
324 //nnMatrixDel(&weights_T[l]); 334 // nnMatrixDel(&weights_T[l]);
325 nnMatrixDel(&weight_deltas[l]); 335 nnMatrixDel(&weight_deltas[l]);
326 336
327 nnGradientElements* elems = &gradient_elems[l]; 337 nnGradientElements* elems = &gradient_elems[l];
328 switch (elems->type) { 338 switch (elems->type) {
329 case nnIdentity: 339 case nnIdentity:
330 break; // Gradient vector is borrowed, no need to deallocate. 340 break; // Gradient vector is borrowed, no need to deallocate.
331 341
332 case nnSigmoid: 342 case nnSigmoid:
333 nnMatrixDel(&elems->gradient); 343 nnMatrixDel(&elems->gradient);
334 nnMatrixDel(&elems->sigmoid.ones); 344 nnMatrixDel(&elems->sigmoid.ones);
335 break; 345 break;
336 346
337 case nnRelu: 347 case nnRelu:
338 nnMatrixDel(&elems->gradient); 348 nnMatrixDel(&elems->gradient);
339 break; 349 break;
340 } 350 }
341 } 351 }
342 nnMatrixDel(&training_inputs_T); 352 nnMatrixDel(&training_inputs_T);
343 free(errors); 353 free(errors);
344 free(outputs_T); 354 free(outputs_T);
345 //free(weights_T); 355 // free(weights_T);
346 free(weight_deltas); 356 free(weight_deltas);
347 free(gradient_elems); 357 free(gradient_elems);
348} 358}