neural_network_raw.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. """ Neural Network.
  2. A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
  3. implementation with TensorFlow. This example is using the MNIST database
  4. of handwritten digits (http://yann.lecun.com/exdb/mnist/).
  5. Links:
  6. [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
  7. Author: Aymeric Damien
  8. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  9. """
  10. from __future__ import print_function
  11. # Import MNIST data
  12. from tensorflow.examples.tutorials.mnist import input_data
  13. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  14. import tensorflow as tf
  15. # Parameters
  16. learning_rate = 0.1
  17. num_steps = 500
  18. batch_size = 128
  19. display_step = 100
  20. # Network Parameters
  21. n_hidden_1 = 256 # 1st layer number of neurons
  22. n_hidden_2 = 256 # 2nd layer number of neurons
  23. num_input = 784 # MNIST data input (img shape: 28*28)
  24. num_classes = 10 # MNIST total classes (0-9 digits)
  25. # tf Graph input
  26. X = tf.placeholder("float", [None, num_input])
  27. Y = tf.placeholder("float", [None, num_classes])
  28. # Store layers weight & bias
  29. weights = {
  30. 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
  31. 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
  32. 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
  33. }
  34. biases = {
  35. 'b1': tf.Variable(tf.random_normal([n_hidden_1])),
  36. 'b2': tf.Variable(tf.random_normal([n_hidden_2])),
  37. 'out': tf.Variable(tf.random_normal([num_classes]))
  38. }
  39. # Create model
  40. def neural_net(x):
  41. # Hidden fully connected layer with 256 neurons
  42. layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
  43. # Hidden fully connected layer with 256 neurons
  44. layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
  45. # Output fully connected layer with a neuron for each class
  46. out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
  47. return out_layer
  48. # Construct model
  49. logits = neural_net(X)
  50. prediction = tf.nn.softmax(logits)
  51. # Define loss and optimizer
  52. loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
  53. logits=logits, labels=Y))
  54. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  55. train_op = optimizer.minimize(loss_op)
  56. # Evaluate model
  57. correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
  58. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  59. # Initialize the variables (i.e. assign their default value)
  60. init = tf.global_variables_initializer()
  61. # Start training
  62. with tf.Session() as sess:
  63. # Run the initializer
  64. sess.run(init)
  65. for step in range(1, num_steps+1):
  66. batch_x, batch_y = mnist.train.next_batch(batch_size)
  67. # Run optimization op (backprop)
  68. sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
  69. if step % display_step == 0 or step == 1:
  70. # Calculate batch loss and accuracy
  71. loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
  72. Y: batch_y})
  73. print("Step " + str(step) + ", Minibatch Loss= " + \
  74. "{:.4f}".format(loss) + ", Training Accuracy= " + \
  75. "{:.3f}".format(acc))
  76. print("Optimization Finished!")
  77. # Calculate accuracy for MNIST test images
  78. print("Testing Accuracy:", \
  79. sess.run(accuracy, feed_dict={X: mnist.test.images,
  80. Y: mnist.test.labels}))