-
Notifications
You must be signed in to change notification settings - Fork 0
/
mnist.c
87 lines (69 loc) · 2.74 KB
/
mnist.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include "include/mnist_file.h"
#include "include/neural_network.h"
#define STEPS 1000
#define BATCH_SIZE 100
/**
* Downloaded from: http://yann.lecun.com/exdb/mnist/
*/
const char * train_images_file = "data/train-images-idx3-ubyte";
const char * train_labels_file = "data/train-labels-idx1-ubyte";
const char * test_images_file = "data/t10k-images-idx3-ubyte";
const char * test_labels_file = "data/t10k-labels-idx1-ubyte";
/**
* Calculate the accuracy of the predictions of a neural network on a dataset.
*/
float calculate_accuracy(mnist_dataset_t * dataset, neural_network_t * network)
{
float activations[MNIST_LABELS], max_activation;
int i, j, correct, predict;
// Loop through the dataset
for (i = 0, correct = 0; i < dataset->size; i++) {
// Calculate the activations for each image using the neural network
neural_network_hypothesis(&dataset->images[i], network, activations);
// Set predict to the index of the greatest activation
for (j = 0, predict = 0, max_activation = activations[0]; j < MNIST_LABELS; j++) {
if (max_activation < activations[j]) {
max_activation = activations[j];
predict = j;
}
}
// Increment the correct count if we predicted the right label
if (predict == dataset->labels[i]) {
correct++;
}
}
// Return the percentage we predicted correctly as the accuracy
return ((float) correct) / ((float) dataset->size);
}
int main(int argc, char *argv[])
{
mnist_dataset_t * train_dataset, * test_dataset;
mnist_dataset_t batch;
neural_network_t network;
float loss, accuracy;
int i, batches;
// Read the datasets from the files
train_dataset = mnist_get_dataset(train_images_file, train_labels_file);
test_dataset = mnist_get_dataset(test_images_file, test_labels_file);
// Initialise weights and biases with random values
neural_network_random_weights(&network);
// Calculate how many batches (so we know when to wrap around)
batches = train_dataset->size / BATCH_SIZE;
for (i = 0; i < STEPS; i++) {
// Initialise a new batch
mnist_batch(train_dataset, &batch, 100, i % batches);
// Run one step of gradient descent and calculate the loss
loss = neural_network_training_step(&batch, &network, 0.5);
// Calculate the accuracy using the whole test dataset
accuracy = calculate_accuracy(test_dataset, &network);
printf("Step %04d\tAverage Loss: %.2f\tAccuracy: %.3f\n", i, loss / batch.size, accuracy);
}
// Cleanup
mnist_free_dataset(train_dataset);
mnist_free_dataset(test_dataset);
return 0;
}