1 #!/usr/bin/env dub 2 /+ dub.sdl: 3 dependency "dopt" path=".." 4 +/ 5 module mnist; 6 7 /* 8 This example trains a small convolutional network on the MNIST dataset of hand-written images. The network used is 9 very small by today's standards, but MNIST is a very easy dataset so this does not really matter. 10 11 The MNIST dataset contains small monochrome images of hand-written digits, and the goal is to determine which digit 12 each image contains. 13 */ 14 void main(string[] args) 15 { 16 import std.algorithm : joiner, maxIndex; 17 import std.array : array; 18 import std.range : zip, chunks; 19 import std.stdio : stderr, writeln; 20 21 import dopt.core; 22 import dopt.nnet; 23 import dopt.online; 24 25 if(args.length != 2) 26 { 27 stderr.writeln("Usage: mnist.d <data directory>"); 28 return; 29 } 30 31 //Load the minst dataset of hand-written digits. Download the binary files from http://yann.lecun.com/exdb/mnist/ 32 auto data = loadMNIST(args[1]); 33 34 //Create the variables nodes required to pass data into the operation graph 35 size_t batchSize = 100; 36 auto features = float32([batchSize, 1, 28, 28]); 37 auto labels = float32([batchSize, 10]); 38 39 //Construct a small convolutional network 40 auto preds = dataSource(features) 41 .conv2D(32, [5, 5]) 42 .relu() 43 .maxPool([2, 2]) 44 .conv2D(32, [5, 5]) 45 .relu() 46 .maxPool([2, 2]) 47 .dense(10) 48 .softmax(); 49 50 //Construct the DAGNetwork object that can be used to collate all the parameters and loss terms 51 auto network = new DAGNetwork([features], [preds]); 52 53 //Create a symbol to represent the training loss function 54 auto lossSym = crossEntropy(preds.trainOutput, labels) + network.paramLoss; 55 56 //Create an optimiser that can use minibatches of labelled data to update the weights of the network 57 auto lr = float32([], [0.001f]); 58 auto updater = adam([lossSym], network.params, network.paramProj, lr); 59 auto testPlan = compile([preds.output]); 60 61 size_t bidx; 62 float[] fs = new float[features.volume]; 63 float[] ls = new float[labels.volume]; 64 65 //Iterate for 40 epochs of training 66 foreach(e; 0 .. 40) 67 { 68 float totloss = 0; 69 float tot = 0; 70 71 if(e == 30) 72 { 73 lr.value.set([0.0001f]); 74 } 75 76 data.train.restart(); 77 78 while(!data.train.finished()) 79 { 80 //Get the next batch of training data (put into [fs, ls]). Update bidx with the next batch index. 81 data.train.getBatch([fs, ls]); 82 83 auto loss = updater([ 84 features: buffer(fs), 85 labels: buffer(ls) 86 ]); 87 88 totloss += loss[0].get!float[0]; 89 tot++; 90 } 91 92 //Write out the training loss for this epoch 93 writeln(e, ": ", totloss / tot); 94 } 95 96 int correct; 97 int total; 98 99 import std.stdio : writeln; 100 101 while(!data.test.finished()) 102 { 103 //Get the next batch of test data (put into [fs, ls]). Update bidx with the next batch index. 104 data.test.getBatch([fs, ls]); 105 106 //Make some predictions for this minibatch 107 auto pred = testPlan.execute([ 108 features: buffer(fs) 109 ])[0].get!float; 110 111 //Determine the accuracy of these predictions using the ground truth data 112 foreach(p, t; zip(pred.chunks(10), ls.chunks(10))) 113 { 114 if(p.maxIndex == t.maxIndex) 115 { 116 correct++; 117 } 118 119 total++; 120 } 121 } 122 123 //Write out the accuracy of the model on the test set 124 writeln(correct / cast(float)total); 125 }