36 using conv = tiny_dnn::convolutional_layer;
37 using pool = tiny_dnn::max_pooling_layer;
38 using fc = tiny_dnn::fully_connected_layer;
39 using relu = tiny_dnn::relu_layer;
40 using softmax = tiny_dnn::softmax_layer;
42 const size_t n_fmaps = 32;
43 const size_t n_fmaps2 = 64;
44 const size_t n_fc = 64;
46 (*net) << conv(32, 32, 5, 3, n_fmaps, tiny_dnn::padding::same)
47 << pool(32, 32, n_fmaps, 2)
48 << relu(16, 16, n_fmaps)
49 << conv(16, 16, 5, n_fmaps, n_fmaps, tiny_dnn::padding::same)
50 << pool(16, 16, n_fmaps, 2)
51 << relu(8, 8, n_fmaps)
52 << conv(8, 8, 5, n_fmaps, n_fmaps2, tiny_dnn::padding::same)
53 << pool(8, 8, n_fmaps2, 2)
54 << relu(4, 4, n_fmaps2)
55 << fc(4 * 4 * n_fmaps2, n_fc)
56 << fc(n_fc, 10) << softmax(10);
62 LINFO(
"Load training data from directory " << path);
64 float learning_rate = 0.01F;
67 std::vector<tiny_dnn::label_t> train_labels, test_labels;
68 std::vector<tiny_dnn::vec_t> train_images, test_images;
69 for (
int i = 1; i <= 5; ++i)
70 tiny_dnn::parse_cifar10(path +
"/data_batch_" + std::to_string(i) +
".bin",
71 &train_images, &train_labels, -1.0, 1.0, 0, 0);
73 tiny_dnn::parse_cifar10(path +
"/test_batch.bin", &test_images, &test_labels, -1.0, 1.0, 0, 0);
75 LINFO(
"Start training...");
76 int const n_minibatch = 10;
77 int const n_train_epochs = 30;
82 auto on_enumerate_epoch = [&](){
83 LINFO(t.elapsed() <<
"s elapsed.");
84 tiny_dnn::result res =
net->test(test_images, test_labels);
85 LINFO(res.num_success <<
"/" << res.num_total <<
" success/total validation score so far");
91 auto on_enumerate_minibatch = [&](){
96 tiny_dnn::adam optimizer;
97 optimizer.alpha *=
static_cast<tiny_dnn::float_t
>(sqrt(n_minibatch) * learning_rate);
98 net->train<tiny_dnn::cross_entropy>(optimizer, train_images, train_labels, n_minibatch, n_train_epochs,
99 on_enumerate_minibatch, on_enumerate_epoch);
101 LINFO(
"Training complete");
104 net->test(test_images, test_labels).print_detail(std::cout);