convolutional_network.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. '''
  2. A Convolutional Network implementation example using TensorFlow library.
  3. This example is using the MNIST database of handwritten digits
  4. (http://yann.lecun.com/exdb/mnist/)
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. import tensorflow as tf
  9. # Import MINST data
  10. from tensorflow.examples.tutorials.mnist import input_data
  11. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  12. # Parameters
  13. learning_rate = 0.001
  14. training_iters = 200000
  15. batch_size = 128
  16. display_step = 10
  17. # Network Parameters
  18. n_input = 784 # MNIST data input (img shape: 28*28)
  19. n_classes = 10 # MNIST total classes (0-9 digits)
  20. dropout = 0.75 # Dropout, probability to keep units
  21. # tf Graph input
  22. x = tf.placeholder(tf.float32, [None, n_input])
  23. y = tf.placeholder(tf.float32, [None, n_classes])
  24. keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
  25. # Create some wrappers for simplicity
  26. def conv2d(x, W, b, strides=1):
  27. # Conv2D wrapper, with bias and relu activation
  28. x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
  29. x = tf.nn.bias_add(x, b)
  30. return tf.nn.relu(x)
  31. def maxpool2d(x, k=2):
  32. # MaxPool2D wrapper
  33. return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
  34. padding='SAME')
  35. # Create model
  36. def conv_net(x, weights, biases, dropout):
  37. # Reshape input picture
  38. x = tf.reshape(x, shape=[-1, 28, 28, 1])
  39. # Convolution Layer
  40. conv1 = conv2d(x, weights['wc1'], biases['bc1'])
  41. # Max Pooling (down-sampling)
  42. conv1 = maxpool2d(conv1, k=2)
  43. # Convolution Layer
  44. conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
  45. # Max Pooling (down-sampling)
  46. conv2 = maxpool2d(conv2, k=2)
  47. # Fully connected layer
  48. # Reshape conv2 output to fit fully connected layer input
  49. fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
  50. fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
  51. fc1 = tf.nn.relu(fc1)
  52. # Apply Dropout
  53. fc1 = tf.nn.dropout(fc1, dropout)
  54. # Output, class prediction
  55. out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
  56. return out
  57. # Store layers weight & bias
  58. weights = {
  59. # 5x5 conv, 1 input, 32 outputs
  60. 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
  61. # 5x5 conv, 32 inputs, 64 outputs
  62. 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
  63. # fully connected, 7*7*64 inputs, 1024 outputs
  64. 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
  65. # 1024 inputs, 10 outputs (class prediction)
  66. 'out': tf.Variable(tf.random_normal([1024, n_classes]))
  67. }
  68. biases = {
  69. 'bc1': tf.Variable(tf.random_normal([32])),
  70. 'bc2': tf.Variable(tf.random_normal([64])),
  71. 'bd1': tf.Variable(tf.random_normal([1024])),
  72. 'out': tf.Variable(tf.random_normal([n_classes]))
  73. }
  74. # Construct model
  75. pred = conv_net(x, weights, biases, keep_prob)
  76. # Define loss and optimizer
  77. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
  78. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  79. # Evaluate model
  80. correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  81. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  82. # Initializing the variables
  83. init = tf.initialize_all_variables()
  84. # Launch the graph
  85. with tf.Session() as sess:
  86. sess.run(init)
  87. step = 1
  88. # Keep training until reach max iterations
  89. while step * batch_size < training_iters:
  90. batch_x, batch_y = mnist.train.next_batch(batch_size)
  91. # Run optimization op (backprop)
  92. sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
  93. keep_prob: dropout})
  94. if step % display_step == 0:
  95. # Calculate batch loss and accuracy
  96. loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
  97. y: batch_y,
  98. keep_prob: 1.})
  99. print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
  100. "{:.6f}".format(loss) + ", Training Accuracy= " + \
  101. "{:.5f}".format(acc)
  102. step += 1
  103. print "Optimization Finished!"
  104. # Calculate accuracy for 256 mnist test images
  105. print "Testing Accuracy:", \
  106. sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
  107. y: mnist.test.labels[:256],
  108. keep_prob: 1.})