|
@@ -0,0 +1,137 @@
|
|
|
+""" Neural Network.
|
|
|
+
|
|
|
+A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
|
|
|
+implementation with TensorFlow. This example is using the MNIST database
|
|
|
+of handwritten digits (http://yann.lecun.com/exdb/mnist/).
|
|
|
+
|
|
|
+This example is using TensorFlow layers, see 'neural_network_raw' example for
|
|
|
+a raw implementation with variables.
|
|
|
+
|
|
|
+Links:
|
|
|
+ [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
|
|
|
+
|
|
|
+Author: Aymeric Damien
|
|
|
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
|
|
|
+"""
|
|
|
+from __future__ import print_function
|
|
|
+
|
|
|
+import tensorflow as tf
|
|
|
+import tensorflow.contrib.eager as tfe
|
|
|
+
|
|
|
+# Set Eager API
|
|
|
+tfe.enable_eager_execution()
|
|
|
+
|
|
|
+# Import MNIST data
|
|
|
+from tensorflow.examples.tutorials.mnist import input_data
|
|
|
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
|
|
|
+
|
|
|
+# Parameters
|
|
|
+learning_rate = 0.001
|
|
|
+num_steps = 1000
|
|
|
+batch_size = 128
|
|
|
+display_step = 100
|
|
|
+
|
|
|
+# Network Parameters
|
|
|
+n_hidden_1 = 256 # 1st layer number of neurons
|
|
|
+n_hidden_2 = 256 # 2nd layer number of neurons
|
|
|
+num_input = 784 # MNIST data input (img shape: 28*28)
|
|
|
+num_classes = 10 # MNIST total classes (0-9 digits)
|
|
|
+
|
|
|
+# Using TF Dataset to split data into batches
|
|
|
+dataset = tf.data.Dataset.from_tensor_slices(
|
|
|
+ (mnist.train.images, mnist.train.labels)).batch(batch_size)
|
|
|
+dataset_iter = tfe.Iterator(dataset)
|
|
|
+
|
|
|
+
|
|
|
+# Define the neural network. To use eager API and tf.layers API together,
|
|
|
+# we must instantiate a tfe.Network class as follow:
|
|
|
+class NeuralNet(tfe.Network):
|
|
|
+ def __init__(self):
|
|
|
+ # Define each layer
|
|
|
+ super(NeuralNet, self).__init__()
|
|
|
+ # Hidden fully connected layer with 256 neurons
|
|
|
+ self.layer1 = self.track_layer(
|
|
|
+ tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))
|
|
|
+ # Hidden fully connected layer with 256 neurons
|
|
|
+ self.layer2 = self.track_layer(
|
|
|
+ tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))
|
|
|
+ # Output fully connected layer with a neuron for each class
|
|
|
+ self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
|
|
|
+
|
|
|
+ def call(self, x):
|
|
|
+ x = self.layer1(x)
|
|
|
+ x = self.layer2(x)
|
|
|
+ return self.out_layer(x)
|
|
|
+
|
|
|
+
|
|
|
+neural_net = NeuralNet()
|
|
|
+
|
|
|
+
|
|
|
+# Cross-Entropy loss function
|
|
|
+def loss_fn(inference_fn, inputs, labels):
|
|
|
+ # Using sparse_softmax cross entropy
|
|
|
+ return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
|
|
|
+ logits=inference_fn(inputs), labels=labels))
|
|
|
+
|
|
|
+
|
|
|
+# Calculate accuracy
|
|
|
+def accuracy_fn(inference_fn, inputs, labels):
|
|
|
+ prediction = tf.nn.softmax(inference_fn(inputs))
|
|
|
+ correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
|
|
|
+ return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
|
|
|
+
|
|
|
+
|
|
|
+# SGD Optimizer
|
|
|
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
|
|
|
+# Compute gradients
|
|
|
+grad = tfe.implicit_gradients(loss_fn)
|
|
|
+
|
|
|
+# Training
|
|
|
+average_loss = 0.
|
|
|
+average_acc = 0.
|
|
|
+for step in range(num_steps):
|
|
|
+
|
|
|
+ # Iterate through the dataset
|
|
|
+ try:
|
|
|
+ d = dataset_iter.next()
|
|
|
+ except StopIteration:
|
|
|
+ # Refill queue
|
|
|
+ dataset_iter = tfe.Iterator(dataset)
|
|
|
+ d = dataset_iter.next()
|
|
|
+
|
|
|
+ # Images
|
|
|
+ x_batch = d[0]
|
|
|
+ # Labels
|
|
|
+ y_batch = tf.cast(d[1], dtype=tf.int64)
|
|
|
+
|
|
|
+ # Compute the batch loss
|
|
|
+ batch_loss = loss_fn(neural_net, x_batch, y_batch)
|
|
|
+ average_loss += batch_loss
|
|
|
+ # Compute the batch accuracy
|
|
|
+ batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)
|
|
|
+ average_acc += batch_accuracy
|
|
|
+
|
|
|
+ if step == 0:
|
|
|
+ # Display the initial cost, before optimizing
|
|
|
+ print("Initial loss= {:.9f}".format(average_loss))
|
|
|
+
|
|
|
+ # Update the variables following gradients info
|
|
|
+ optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))
|
|
|
+
|
|
|
+ # Display info
|
|
|
+ if (step + 1) % display_step == 0 or step == 0:
|
|
|
+ if step > 0:
|
|
|
+ average_loss /= display_step
|
|
|
+ average_acc /= display_step
|
|
|
+ print("Step:", '%04d' % (step + 1), " loss=",
|
|
|
+ "{:.9f}".format(average_loss), " accuracy=",
|
|
|
+ "{:.4f}".format(average_acc))
|
|
|
+ average_loss = 0.
|
|
|
+ average_acc = 0.
|
|
|
+
|
|
|
+# Evaluate model on the test image set
|
|
|
+testX = mnist.test.images
|
|
|
+testY = mnist.test.labels
|
|
|
+
|
|
|
+test_acc = accuracy_fn(neural_net, testX, testY)
|
|
|
+print("Testset Accuracy: {:.4f}".format(test_acc))
|