1 #!/usr/bin/env dub
2 /+
3 dub.json:
4 {
5     "name": "cifar10",
6     "dependencies": {
7         "dopt": {
8             "path": "../"
9         }
10     }
11 }
12 +/
13 module cifar10;
14 
15 import std.algorithm;
16 import std.array;
17 import std.file;
18 import std.range;
19 import std.stdio;
20 import std.typecons;
21 
22 auto loadCIFAR10(string path)
23 {
24 	auto batches = ["data_batch_1.bin",
25 		 			"data_batch_2.bin",
26 					"data_batch_3.bin",
27 					"data_batch_4.bin",
28 					"data_batch_5.bin",
29 					"test_batch.bin"].map!(x => path ~ "/" ~ x).array();
30 
31 	alias T = float;
32 	T[][] features;
33 	T[][] labels;
34 
35 	foreach(b; batches)
36 	{
37 		ubyte[] raw = cast(ubyte[])read(b);
38 
39 		for(size_t i = 0; i < 10_000; i++)
40 		{
41 			auto f = raw[1 .. 32 * 32 * 3 + 1]
42 				.map!(x => (cast(T)x - 128.0f) / 48.0f)
43 				.array();
44 
45 			auto ls = new T[10];
46 			ls[] = 0;
47 			ls[raw[0]] = 1.0f;
48 			labels ~= ls;
49 			features ~= f;
50 
51 			raw = raw[32 * 32 * 3 + 1 .. $];
52 		}
53 	}
54 
55 	return tuple!("trainFeatures", "trainLabels", "testFeatures", "testLabels")
56 				 (features[0 .. 50_000], labels[0 .. 50_000], features[50_000 .. $], labels[50_000 .. $]);
57 }
58 	
59 import dopt.core;
60 import dopt.nnet;
61 import dopt.online;
62 
63 Layer vggBlock(Layer input, size_t channels)
64 {
65     return input
66           .conv2D(channels, [3, 3], new Conv2DOptions().padding([1, 1]))
67           .relu()
68           .conv2D(channels, [3, 3], new Conv2DOptions().padding([1, 1]))
69           .relu()
70           .maxPool([2, 2]);
71 }
72 
73 void main(string[] args)
74 {
75     auto data = loadCIFAR10(args[1]);
76 
77     auto features = float32([100, 3, 32, 32]);
78     auto labels = float32([100, 10]);
79 
80     auto preds = dataSource(features)
81                 .vggBlock(64)
82                 .vggBlock(128)
83                 .vggBlock(256)
84                 .vggBlock(512)
85                 .vggBlock(512)
86                 .dense(512)
87                 .relu()
88                 .dense(512)
89                 .relu()
90                 .dense(10)
91                 .softmax();
92     
93     auto network = new DAGNetwork([features], [preds]);
94 
95     auto lossSym = crossEntropy(preds.trainOutput, labels) + network.paramLoss;
96 
97 	auto learningRate = float32([], [0.0001f]);
98 	auto updater = adam([lossSym], network.params, null, learningRate);
99 
100 	foreach(e; 0 .. 120)
101 	{
102 		float totloss = 0;
103 		float tot = 0;
104 
105 		if(e == 100)
106 		{
107 			learningRate.value.as!float[0] = 0.00001f;
108 		}
109 
110 		foreach(fs, ls; zip(data.trainFeatures.chunks(100), data.trainLabels.chunks(100)))
111 		{
112 			auto loss = updater([
113 				features: Buffer(fs.joiner().array()),
114 				labels: Buffer(ls.joiner().array())
115 			]);
116 
117 			totloss += loss[0].as!float[0];
118 			tot++;
119 
120             write("  ", tot, "/500    \r");
121             stdout.flush();
122 		}
123 
124 		writeln();
125 		writeln(e, ": ", totloss / tot);
126 	}
127 
128 	int correct;
129 	int total;
130 
131 	import std.stdio : writeln;
132 
133 	foreach(fs, ls; zip(data.testFeatures.chunks(100), data.testLabels.chunks(100)))
134 	{
135 		auto pred = network.outputs[0].evaluate([
136 			features: Buffer(fs.joiner().array())
137 		]).as!float;
138 
139 		foreach(p, t; zip(pred.chunks(10), ls))
140 		{
141 			if(p.maxIndex == t.maxIndex)
142 			{
143 				correct++;
144 			}
145 
146 			total++;
147 		}
148 	}
149 
150 	writeln(correct / cast(float)total);
151 }