diff options
Diffstat (limited to 'src/lib/test/neuralnet_test.c')
-rw-r--r-- | src/lib/test/neuralnet_test.c | 103 |
1 files changed, 62 insertions, 41 deletions
diff --git a/src/lib/test/neuralnet_test.c b/src/lib/test/neuralnet_test.c index 14d9438..0f8d7b8 100644 --- a/src/lib/test/neuralnet_test.c +++ b/src/lib/test/neuralnet_test.c | |||
@@ -1,8 +1,8 @@ | |||
1 | #include <neuralnet/neuralnet.h> | 1 | #include <neuralnet/neuralnet.h> |
2 | 2 | ||
3 | #include <neuralnet/matrix.h> | ||
4 | #include "activation.h" | 3 | #include "activation.h" |
5 | #include "neuralnet_impl.h" | 4 | #include "neuralnet_impl.h" |
5 | #include <neuralnet/matrix.h> | ||
6 | 6 | ||
7 | #include "test.h" | 7 | #include "test.h" |
8 | #include "test_util.h" | 8 | #include "test_util.h" |
@@ -10,23 +10,31 @@ | |||
10 | #include <assert.h> | 10 | #include <assert.h> |
11 | 11 | ||
12 | TEST_CASE(neuralnet_perceptron_test) { | 12 | TEST_CASE(neuralnet_perceptron_test) { |
13 | const int num_layers = 1; | 13 | const int num_layers = 2; |
14 | const int layer_sizes[] = { 1, 1 }; | 14 | const int input_size = 1; |
15 | const nnActivation layer_activations[] = { nnSigmoid }; | 15 | const R weights[] = {0.3}; |
16 | const R weights[] = { 0.3 }; | 16 | const R biases[] = {0.0}; |
17 | const nnLayer layers[] = { | ||
18 | {.type = nnLinear, | ||
19 | .linear = | ||
20 | {.weights = nnMatrixFromArray(1, 1, weights), | ||
21 | .biases = nnMatrixFromArray(1, 1, biases)}}, | ||
22 | {.type = nnSigmoid}, | ||
23 | }; | ||
17 | 24 | ||
18 | nnNeuralNetwork* net = nnMakeNet(num_layers, layer_sizes, layer_activations); | 25 | nnNeuralNetwork* net = nnMakeNet(layers, num_layers, input_size); |
19 | assert(net); | 26 | assert(net); |
20 | nnSetWeights(net, weights); | ||
21 | 27 | ||
22 | nnQueryObject* query = nnMakeQueryObject(net, /*num_inputs=*/1); | 28 | nnQueryObject* query = nnMakeQueryObject(net, 1); |
23 | 29 | ||
24 | const R input[] = { 0.9 }; | 30 | const R input[] = {0.9}; |
25 | R output[1]; | 31 | R output[1]; |
26 | nnQueryArray(net, query, input, output); | 32 | nnQueryArray(net, query, input, output); |
27 | 33 | ||
28 | const R expected_output = sigmoid(input[0] * weights[0]); | 34 | const R expected_output = sigmoid(input[0] * weights[0]); |
29 | printf("\nOutput: %f, Expected: %f\n", output[0], expected_output); | 35 | printf( |
36 | "\n[neuralnet_perceptron_test] Output: %f, Expected: %f\n", output[0], | ||
37 | expected_output); | ||
30 | TEST_TRUE(double_eq(output[0], expected_output, EPS)); | 38 | TEST_TRUE(double_eq(output[0], expected_output, EPS)); |
31 | 39 | ||
32 | nnDeleteQueryObject(&query); | 40 | nnDeleteQueryObject(&query); |
@@ -34,53 +42,66 @@ TEST_CASE(neuralnet_perceptron_test) { | |||
34 | } | 42 | } |
35 | 43 | ||
36 | TEST_CASE(neuralnet_xor_test) { | 44 | TEST_CASE(neuralnet_xor_test) { |
37 | const int num_layers = 2; | 45 | // First (hidden) layer. |
38 | const int layer_sizes[] = { 2, 2, 1 }; | 46 | const R weights0[] = {1, 1, 1, 1}; |
39 | const nnActivation layer_activations[] = { nnRelu, nnIdentity }; | 47 | const R biases0[] = {0, -1}; |
40 | const R weights[] = { | 48 | // Second (output) layer. |
41 | 1, 1, 1, 1, // First (hidden) layer. | 49 | const R weights1[] = {1, -2}; |
42 | 1, -2 // Second (output) layer. | 50 | const R biases1[] = {0}; |
43 | }; | 51 | // Network. |
44 | const R biases[] = { | 52 | const int num_layers = 3; |
45 | 0, -1, // First (hidden) layer. | 53 | const int input_size = 2; |
46 | 0 // Second (output) layer. | 54 | const nnLayer layers[] = { |
55 | {.type = nnLinear, | ||
56 | .linear = | ||
57 | {.weights = nnMatrixFromArray(2, 2, weights0), | ||
58 | .biases = nnMatrixFromArray(1, 2, biases0)}}, | ||
59 | {.type = nnRelu}, | ||
60 | {.type = nnLinear, | ||
61 | .linear = | ||
62 | {.weights = nnMatrixFromArray(2, 1, weights1), | ||
63 | .biases = nnMatrixFromArray(1, 1, biases1)}}, | ||
47 | }; | 64 | }; |
48 | 65 | ||
49 | nnNeuralNetwork* net = nnMakeNet(num_layers, layer_sizes, layer_activations); | 66 | nnNeuralNetwork* net = nnMakeNet(layers, num_layers, input_size); |
50 | assert(net); | 67 | assert(net); |
51 | nnSetWeights(net, weights); | ||
52 | nnSetBiases(net, biases); | ||
53 | 68 | ||
54 | // First layer weights. | 69 | // First layer weights. |
55 | TEST_EQUAL(nnMatrixAt(&net->weights[0], 0, 0), 1); | 70 | TEST_EQUAL(nnMatrixAt(&net->layers[0].linear.weights, 0, 0), 1); |
56 | TEST_EQUAL(nnMatrixAt(&net->weights[0], 0, 1), 1); | 71 | TEST_EQUAL(nnMatrixAt(&net->layers[0].linear.weights, 0, 1), 1); |
57 | TEST_EQUAL(nnMatrixAt(&net->weights[0], 0, 2), 1); | 72 | TEST_EQUAL(nnMatrixAt(&net->layers[0].linear.weights, 0, 2), 1); |
58 | TEST_EQUAL(nnMatrixAt(&net->weights[0], 0, 3), 1); | 73 | TEST_EQUAL(nnMatrixAt(&net->layers[0].linear.weights, 0, 3), 1); |
59 | // Second layer weights. | 74 | // Second linear layer (third layer) weights. |
60 | TEST_EQUAL(nnMatrixAt(&net->weights[1], 0, 0), 1); | 75 | TEST_EQUAL(nnMatrixAt(&net->layers[2].linear.weights, 0, 0), 1); |
61 | TEST_EQUAL(nnMatrixAt(&net->weights[1], 0, 1), -2); | 76 | TEST_EQUAL(nnMatrixAt(&net->layers[2].linear.weights, 0, 1), -2); |
62 | // First layer biases. | 77 | // First layer biases. |
63 | TEST_EQUAL(nnMatrixAt(&net->biases[0], 0, 0), 0); | 78 | TEST_EQUAL(nnMatrixAt(&net->layers[0].linear.biases, 0, 0), 0); |
64 | TEST_EQUAL(nnMatrixAt(&net->biases[0], 0, 1), -1); | 79 | TEST_EQUAL(nnMatrixAt(&net->layers[0].linear.biases, 0, 1), -1); |
65 | // Second layer biases. | 80 | // Second linear layer (third layer) biases. |
66 | TEST_EQUAL(nnMatrixAt(&net->biases[1], 0, 0), 0); | 81 | TEST_EQUAL(nnMatrixAt(&net->layers[2].linear.biases, 0, 0), 0); |
67 | 82 | ||
68 | // Test. | 83 | // Test. |
69 | 84 | ||
70 | #define M 4 | 85 | #define M 4 |
71 | 86 | ||
72 | nnQueryObject* query = nnMakeQueryObject(net, /*num_inputs=*/M); | 87 | nnQueryObject* query = nnMakeQueryObject(net, M); |
73 | 88 | ||
74 | const R test_inputs[M][2] = { { 0., 0. }, { 1., 0. }, { 0., 1. }, { 1., 1. } }; | 89 | const R test_inputs[M][2] = { |
90 | {0., 0.}, | ||
91 | {1., 0.}, | ||
92 | {0., 1.}, | ||
93 | {1., 1.} | ||
94 | }; | ||
75 | nnMatrix test_inputs_matrix = nnMatrixMake(M, 2); | 95 | nnMatrix test_inputs_matrix = nnMatrixMake(M, 2); |
76 | nnMatrixInit(&test_inputs_matrix, (const R*)test_inputs); | 96 | nnMatrixInit(&test_inputs_matrix, (const R*)test_inputs); |
77 | nnQuery(net, query, &test_inputs_matrix); | 97 | nnQuery(net, query, &test_inputs_matrix); |
78 | 98 | ||
79 | const R expected_outputs[M] = { 0., 1., 1., 0. }; | 99 | const R expected_outputs[M] = {0., 1., 1., 0.}; |
80 | for (int i = 0; i < M; ++i) { | 100 | for (int i = 0; i < M; ++i) { |
81 | const R test_output = nnMatrixAt(nnNetOutputs(query), i, 0); | 101 | const R test_output = nnMatrixAt(nnNetOutputs(query), i, 0); |
82 | printf("\nInput: (%f, %f), Output: %f, Expected: %f\n", | 102 | printf( |
83 | test_inputs[i][0], test_inputs[i][1], test_output, expected_outputs[i]); | 103 | "\nInput: (%f, %f), Output: %f, Expected: %f\n", test_inputs[i][0], |
104 | test_inputs[i][1], test_output, expected_outputs[i]); | ||
84 | } | 105 | } |
85 | for (int i = 0; i < M; ++i) { | 106 | for (int i = 0; i < M; ++i) { |
86 | const R test_output = nnMatrixAt(nnNetOutputs(query), i, 0); | 107 | const R test_output = nnMatrixAt(nnNetOutputs(query), i, 0); |