diff options
| author | 3gg <3gg@shellblade.net> | 2023-11-23 08:38:59 -0800 |
|---|---|---|
| committer | 3gg <3gg@shellblade.net> | 2023-11-23 08:38:59 -0800 |
| commit | 6ca8a31143f087f3bc470d39eb3c00156443802a (patch) | |
| tree | 8a7462d28e75d0cfc4eff323f0b83ff12c6dc860 /src/lib/src/neuralnet.c | |
| parent | 041613467a0915e6ec07cdab0ca3e7b8d757fe5f (diff) | |
Formatting.
Diffstat (limited to 'src/lib/src/neuralnet.c')
| -rw-r--r-- | src/lib/src/neuralnet.c | 65 |
1 files changed, 36 insertions, 29 deletions
diff --git a/src/lib/src/neuralnet.c b/src/lib/src/neuralnet.c index cac611a..a5fc59b 100644 --- a/src/lib/src/neuralnet.c +++ b/src/lib/src/neuralnet.c | |||
| @@ -1,13 +1,14 @@ | |||
| 1 | #include <neuralnet/neuralnet.h> | 1 | #include <neuralnet/neuralnet.h> |
| 2 | 2 | ||
| 3 | #include <neuralnet/matrix.h> | ||
| 4 | #include "activation.h" | 3 | #include "activation.h" |
| 5 | #include "neuralnet_impl.h" | 4 | #include "neuralnet_impl.h" |
| 5 | #include <neuralnet/matrix.h> | ||
| 6 | 6 | ||
| 7 | #include <assert.h> | 7 | #include <assert.h> |
| 8 | #include <stdlib.h> | 8 | #include <stdlib.h> |
| 9 | 9 | ||
| 10 | nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActivation* activations) { | 10 | nnNeuralNetwork* nnMakeNet( |
| 11 | int num_layers, const int* layer_sizes, const nnActivation* activations) { | ||
| 11 | assert(num_layers > 0); | 12 | assert(num_layers > 0); |
| 12 | assert(layer_sizes); | 13 | assert(layer_sizes); |
| 13 | assert(activations); | 14 | assert(activations); |
| @@ -19,10 +20,10 @@ nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActiv | |||
| 19 | 20 | ||
| 20 | net->num_layers = num_layers; | 21 | net->num_layers = num_layers; |
| 21 | 22 | ||
| 22 | net->weights = calloc(num_layers, sizeof(nnMatrix)); | 23 | net->weights = calloc(num_layers, sizeof(nnMatrix)); |
| 23 | net->biases = calloc(num_layers, sizeof(nnMatrix)); | 24 | net->biases = calloc(num_layers, sizeof(nnMatrix)); |
| 24 | net->activations = calloc(num_layers, sizeof(nnActivation)); | 25 | net->activations = calloc(num_layers, sizeof(nnActivation)); |
| 25 | if ( (net->weights == 0) || (net->biases == 0) || (net->activations == 0) ) { | 26 | if ((net->weights == 0) || (net->biases == 0) || (net->activations == 0)) { |
| 26 | nnDeleteNet(&net); | 27 | nnDeleteNet(&net); |
| 27 | return 0; | 28 | return 0; |
| 28 | } | 29 | } |
| @@ -30,15 +31,15 @@ nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActiv | |||
| 30 | for (int l = 0; l < num_layers; ++l) { | 31 | for (int l = 0; l < num_layers; ++l) { |
| 31 | // layer_sizes = { input layer size, first hidden layer size, ...} | 32 | // layer_sizes = { input layer size, first hidden layer size, ...} |
| 32 | const int layer_input_size = layer_sizes[l]; | 33 | const int layer_input_size = layer_sizes[l]; |
| 33 | const int layer_output_size = layer_sizes[l+1]; | 34 | const int layer_output_size = layer_sizes[l + 1]; |
| 34 | 35 | ||
| 35 | // We store the transpose of the weight matrix as written in textbooks. | 36 | // We store the transpose of the weight matrix as written in textbooks. |
| 36 | // Our vectors are row vectors and the matrices row-major. | 37 | // Our vectors are row vectors and the matrices row-major. |
| 37 | const int rows = layer_input_size; | 38 | const int rows = layer_input_size; |
| 38 | const int cols = layer_output_size; | 39 | const int cols = layer_output_size; |
| 39 | 40 | ||
| 40 | net->weights[l] = nnMatrixMake(rows, cols); | 41 | net->weights[l] = nnMatrixMake(rows, cols); |
| 41 | net->biases[l] = nnMatrixMake(1, cols); | 42 | net->biases[l] = nnMatrixMake(1, cols); |
| 42 | net->activations[l] = activations[l]; | 43 | net->activations[l] = activations[l]; |
| 43 | } | 44 | } |
| 44 | 45 | ||
| @@ -46,7 +47,7 @@ nnNeuralNetwork* nnMakeNet(int num_layers, const int* layer_sizes, const nnActiv | |||
| 46 | } | 47 | } |
| 47 | 48 | ||
| 48 | void nnDeleteNet(nnNeuralNetwork** net) { | 49 | void nnDeleteNet(nnNeuralNetwork** net) { |
| 49 | if ( (!net) || (!(*net)) ) { | 50 | if ((!net) || (!(*net))) { |
| 50 | return; | 51 | return; |
| 51 | } | 52 | } |
| 52 | if ((*net)->weights != 0) { | 53 | if ((*net)->weights != 0) { |
| @@ -77,7 +78,7 @@ void nnSetWeights(nnNeuralNetwork* net, const R* weights) { | |||
| 77 | 78 | ||
| 78 | for (int l = 0; l < net->num_layers; ++l) { | 79 | for (int l = 0; l < net->num_layers; ++l) { |
| 79 | nnMatrix* layer_weights = &net->weights[l]; | 80 | nnMatrix* layer_weights = &net->weights[l]; |
| 80 | R* layer_values = layer_weights->values; | 81 | R* layer_values = layer_weights->values; |
| 81 | 82 | ||
| 82 | for (int j = 0; j < layer_weights->rows * layer_weights->cols; ++j) { | 83 | for (int j = 0; j < layer_weights->rows * layer_weights->cols; ++j) { |
| 83 | *layer_values++ = *weights++; | 84 | *layer_values++ = *weights++; |
| @@ -91,7 +92,7 @@ void nnSetBiases(nnNeuralNetwork* net, const R* biases) { | |||
| 91 | 92 | ||
| 92 | for (int l = 0; l < net->num_layers; ++l) { | 93 | for (int l = 0; l < net->num_layers; ++l) { |
| 93 | nnMatrix* layer_biases = &net->biases[l]; | 94 | nnMatrix* layer_biases = &net->biases[l]; |
| 94 | R* layer_values = layer_biases->values; | 95 | R* layer_values = layer_biases->values; |
| 95 | 96 | ||
| 96 | for (int j = 0; j < layer_biases->rows * layer_biases->cols; ++j) { | 97 | for (int j = 0; j < layer_biases->rows * layer_biases->cols; ++j) { |
| 97 | *layer_values++ = *biases++; | 98 | *layer_values++ = *biases++; |
| @@ -99,7 +100,8 @@ void nnSetBiases(nnNeuralNetwork* net, const R* biases) { | |||
| 99 | } | 100 | } |
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | void nnQuery(const nnNeuralNetwork* net, nnQueryObject* query, const nnMatrix* input) { | 103 | void nnQuery( |
| 104 | const nnNeuralNetwork* net, nnQueryObject* query, const nnMatrix* input) { | ||
| 103 | assert(net); | 105 | assert(net); |
| 104 | assert(query); | 106 | assert(query); |
| 105 | assert(input); | 107 | assert(input); |
| @@ -123,29 +125,34 @@ void nnQuery(const nnNeuralNetwork* net, nnQueryObject* query, const nnMatrix* i | |||
| 123 | // We could also rewrite the original Mul function to go row x row, | 125 | // We could also rewrite the original Mul function to go row x row, |
| 124 | // decomposing the multiplication. Preserving the original meaning of Mul | 126 | // decomposing the multiplication. Preserving the original meaning of Mul |
| 125 | // makes everything clearer. | 127 | // makes everything clearer. |
| 126 | nnMatrix output_vector = nnMatrixBorrowRows(&query->layer_outputs[l], i, 1); | 128 | nnMatrix output_vector = |
| 129 | nnMatrixBorrowRows(&query->layer_outputs[l], i, 1); | ||
| 127 | nnMatrixMul(&input_vector, layer_weights, &output_vector); | 130 | nnMatrixMul(&input_vector, layer_weights, &output_vector); |
| 128 | nnMatrixAddRow(&output_vector, layer_biases, &output_vector); | 131 | nnMatrixAddRow(&output_vector, layer_biases, &output_vector); |
| 129 | 132 | ||
| 130 | switch (net->activations[l]) { | 133 | switch (net->activations[l]) { |
| 131 | case nnIdentity: | 134 | case nnIdentity: |
| 132 | break; // Nothing to do for the identity function. | 135 | break; // Nothing to do for the identity function. |
| 133 | case nnSigmoid: | 136 | case nnSigmoid: |
| 134 | sigmoid_array(output_vector.values, output_vector.values, output_vector.cols); | 137 | sigmoid_array( |
| 135 | break; | 138 | output_vector.values, output_vector.values, output_vector.cols); |
| 136 | case nnRelu: | 139 | break; |
| 137 | relu_array(output_vector.values, output_vector.values, output_vector.cols); | 140 | case nnRelu: |
| 138 | break; | 141 | relu_array( |
| 139 | default: | 142 | output_vector.values, output_vector.values, output_vector.cols); |
| 140 | assert(0); | 143 | break; |
| 144 | default: | ||
| 145 | assert(0); | ||
| 141 | } | 146 | } |
| 142 | 147 | ||
| 143 | input_vector = output_vector; // Borrow. | 148 | input_vector = output_vector; // Borrow. |
| 144 | } | 149 | } |
| 145 | } | 150 | } |
| 146 | } | 151 | } |
| 147 | 152 | ||
| 148 | void nnQueryArray(const nnNeuralNetwork* net, nnQueryObject* query, const R* input, R* output) { | 153 | void nnQueryArray( |
| 154 | const nnNeuralNetwork* net, nnQueryObject* query, const R* input, | ||
| 155 | R* output) { | ||
| 149 | assert(net); | 156 | assert(net); |
| 150 | assert(query); | 157 | assert(query); |
| 151 | assert(input); | 158 | assert(input); |
| @@ -177,9 +184,9 @@ nnQueryObject* nnMakeQueryObject(const nnNeuralNetwork* net, int num_inputs) { | |||
| 177 | return 0; | 184 | return 0; |
| 178 | } | 185 | } |
| 179 | for (int l = 0; l < net->num_layers; ++l) { | 186 | for (int l = 0; l < net->num_layers; ++l) { |
| 180 | const nnMatrix* layer_weights = &net->weights[l]; | 187 | const nnMatrix* layer_weights = &net->weights[l]; |
| 181 | const int layer_output_size = nnLayerOutputSize(layer_weights); | 188 | const int layer_output_size = nnLayerOutputSize(layer_weights); |
| 182 | query->layer_outputs[l] = nnMatrixMake(num_inputs, layer_output_size); | 189 | query->layer_outputs[l] = nnMatrixMake(num_inputs, layer_output_size); |
| 183 | } | 190 | } |
| 184 | query->network_outputs = &query->layer_outputs[net->num_layers - 1]; | 191 | query->network_outputs = &query->layer_outputs[net->num_layers - 1]; |
| 185 | 192 | ||
| @@ -187,7 +194,7 @@ nnQueryObject* nnMakeQueryObject(const nnNeuralNetwork* net, int num_inputs) { | |||
| 187 | } | 194 | } |
| 188 | 195 | ||
| 189 | void nnDeleteQueryObject(nnQueryObject** query) { | 196 | void nnDeleteQueryObject(nnQueryObject** query) { |
| 190 | if ( (!query) || (!(*query)) ) { | 197 | if ((!query) || (!(*query))) { |
| 191 | return; | 198 | return; |
| 192 | } | 199 | } |
| 193 | if ((*query)->layer_outputs != 0) { | 200 | if ((*query)->layer_outputs != 0) { |
