aboutsummaryrefslogtreecommitdiff
path: root/src/lib/src/neuralnet.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/src/neuralnet.c')
-rw-r--r--src/lib/src/neuralnet.c228
1 files changed, 228 insertions, 0 deletions
diff --git a/src/lib/src/neuralnet.c b/src/lib/src/neuralnet.c
new file mode 100644
index 0000000..cac611a
--- /dev/null
+++ b/src/lib/src/neuralnet.c
@@ -0,0 +1,228 @@
1#include <neuralnet/neuralnet.h>
2
3#include <neuralnet/matrix.h>
4#include "activation.h"
5#include "neuralnet_impl.h"
6
7#include <assert.h>
8#include <stdlib.h>
9
10nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActivation* activations) {
11 assert(num_layers > 0);
12 assert(layer_sizes);
13 assert(activations);
14
15 nnNeuralNetwork* net = calloc(1, sizeof(nnNeuralNetwork));
16 if (net == 0) {
17 return 0;
18 }
19
20 net->num_layers = num_layers;
21
22 net->weights = calloc(num_layers, sizeof(nnMatrix));
23 net->biases = calloc(num_layers, sizeof(nnMatrix));
24 net->activations = calloc(num_layers, sizeof(nnActivation));
25 if ( (net->weights == 0) || (net->biases == 0) || (net->activations == 0) ) {
26 nnDeleteNet(&net);
27 return 0;
28 }
29
30 for (int l = 0; l < num_layers; ++l) {
31 // layer_sizes = { input layer size, first hidden layer size, ...}
32 const int layer_input_size = layer_sizes[l];
33 const int layer_output_size = layer_sizes[l+1];
34
35 // We store the transpose of the weight matrix as written in textbooks.
36 // Our vectors are row vectors and the matrices row-major.
37 const int rows = layer_input_size;
38 const int cols = layer_output_size;
39
40 net->weights[l] = nnMatrixMake(rows, cols);
41 net->biases[l] = nnMatrixMake(1, cols);
42 net->activations[l] = activations[l];
43 }
44
45 return net;
46}
47
48void nnDeleteNet(nnNeuralNetwork** net) {
49 if ( (!net) || (!(*net)) ) {
50 return;
51 }
52 if ((*net)->weights != 0) {
53 for (int l = 0; l < (*net)->num_layers; ++l) {
54 nnMatrixDel(&(*net)->weights[l]);
55 }
56 free((*net)->weights);
57 (*net)->weights = 0;
58 }
59 if ((*net)->biases != 0) {
60 for (int l = 0; l < (*net)->num_layers; ++l) {
61 nnMatrixDel(&(*net)->biases[l]);
62 }
63 free((*net)->biases);
64 (*net)->biases = 0;
65 }
66 if ((*net)->activations) {
67 free((*net)->activations);
68 (*net)->activations = 0;
69 }
70 free(*net);
71 *net = 0;
72}
73
74void nnSetWeights(nnNeuralNetwork* net, const R* weights) {
75 assert(net);
76 assert(weights);
77
78 for (int l = 0; l < net->num_layers; ++l) {
79 nnMatrix* layer_weights = &net->weights[l];
80 R* layer_values = layer_weights->values;
81
82 for (int j = 0; j < layer_weights->rows * layer_weights->cols; ++j) {
83 *layer_values++ = *weights++;
84 }
85 }
86}
87
88void nnSetBiases(nnNeuralNetwork* net, const R* biases) {
89 assert(net);
90 assert(biases);
91
92 for (int l = 0; l < net->num_layers; ++l) {
93 nnMatrix* layer_biases = &net->biases[l];
94 R* layer_values = layer_biases->values;
95
96 for (int j = 0; j < layer_biases->rows * layer_biases->cols; ++j) {
97 *layer_values++ = *biases++;
98 }
99 }
100}
101
102void nnQuery(const nnNeuralNetwork* net, nnQueryObject* query, const nnMatrix* input) {
103 assert(net);
104 assert(query);
105 assert(input);
106 assert(net->num_layers == query->num_layers);
107 assert(input->rows <= query->network_outputs->rows);
108 assert(input->cols == nnNetInputSize(net));
109
110 for (int i = 0; i < input->rows; ++i) {
111 // Not mutating the input, but we need the cast to borrow.
112 nnMatrix input_vector = nnMatrixBorrowRows((nnMatrix*)input, i, 1);
113
114 for (int l = 0; l < net->num_layers; ++l) {
115 const nnMatrix* layer_weights = &net->weights[l];
116 const nnMatrix* layer_biases = &net->biases[l];
117 // Y^T = (W*X)^T = X^T*W^T
118 //
119 // TODO: If we had a row-row matrix multiplication, we could compute:
120 // Y^T = W ** X^T
121 // The row-row multiplication could be more cache-friendly. We just need
122 // to store W as is, without transposing.
123 // We could also rewrite the original Mul function to go row x row,
124 // decomposing the multiplication. Preserving the original meaning of Mul
125 // makes everything clearer.
126 nnMatrix output_vector = nnMatrixBorrowRows(&query->layer_outputs[l], i, 1);
127 nnMatrixMul(&input_vector, layer_weights, &output_vector);
128 nnMatrixAddRow(&output_vector, layer_biases, &output_vector);
129
130 switch (net->activations[l]) {
131 case nnIdentity:
132 break; // Nothing to do for the identity function.
133 case nnSigmoid:
134 sigmoid_array(output_vector.values, output_vector.values, output_vector.cols);
135 break;
136 case nnRelu:
137 relu_array(output_vector.values, output_vector.values, output_vector.cols);
138 break;
139 default:
140 assert(0);
141 }
142
143 input_vector = output_vector; // Borrow.
144 }
145 }
146}
147
148void nnQueryArray(const nnNeuralNetwork* net, nnQueryObject* query, const R* input, R* output) {
149 assert(net);
150 assert(query);
151 assert(input);
152 assert(output);
153 assert(net->num_layers > 0);
154
155 nnMatrix input_vector = nnMatrixMake(net->weights[0].cols, 1);
156 nnMatrixInit(&input_vector, input);
157 nnQuery(net, query, &input_vector);
158 nnMatrixRowToArray(query->network_outputs, 0, output);
159}
160
161nnQueryObject* nnMakeQueryObject(const nnNeuralNetwork* net, int num_inputs) {
162 assert(net);
163 assert(num_inputs > 0);
164 assert(net->num_layers > 0);
165
166 nnQueryObject* query = calloc(1, sizeof(nnQueryObject));
167 if (!query) {
168 return 0;
169 }
170
171 query->num_layers = net->num_layers;
172
173 // Allocate the intermediate layer output matrices.
174 query->layer_outputs = calloc(net->num_layers, sizeof(nnMatrix));
175 if (!query->layer_outputs) {
176 free(query);
177 return 0;
178 }
179 for (int l = 0; l < net->num_layers; ++l) {
180 const nnMatrix* layer_weights = &net->weights[l];
181 const int layer_output_size = nnLayerOutputSize(layer_weights);
182 query->layer_outputs[l] = nnMatrixMake(num_inputs, layer_output_size);
183 }
184 query->network_outputs = &query->layer_outputs[net->num_layers - 1];
185
186 return query;
187}
188
189void nnDeleteQueryObject(nnQueryObject** query) {
190 if ( (!query) || (!(*query)) ) {
191 return;
192 }
193 if ((*query)->layer_outputs != 0) {
194 for (int l = 0; l < (*query)->num_layers; ++l) {
195 nnMatrixDel(&(*query)->layer_outputs[l]);
196 }
197 }
198 free((*query)->layer_outputs);
199 free(*query);
200 *query = 0;
201}
202
203const nnMatrix* nnNetOutputs(const nnQueryObject* query) {
204 assert(query);
205 return query->network_outputs;
206}
207
208int nnNetInputSize(const nnNeuralNetwork* net) {
209 assert(net);
210 assert(net->num_layers > 0);
211 return net->weights[0].rows;
212}
213
214int nnNetOutputSize(const nnNeuralNetwork* net) {
215 assert(net);
216 assert(net->num_layers > 0);
217 return net->weights[net->num_layers - 1].cols;
218}
219
220int nnLayerInputSize(const nnMatrix* weights) {
221 assert(weights);
222 return weights->rows;
223}
224
225int nnLayerOutputSize(const nnMatrix* weights) {
226 assert(weights);
227 return weights->cols;
228}