recurrent_network.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. '''
  2. A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
  3. This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
  4. Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. # Import MINST data
  9. import input_data
  10. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  11. import tensorflow as tf
  12. from tensorflow.models.rnn import rnn, rnn_cell
  13. import numpy as np
  14. '''
  15. To classify images using a reccurent neural network, we consider every image row as a sequence of pixels.
  16. Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample.
  17. '''
  18. # Parameters
  19. learning_rate = 0.001
  20. training_iters = 100000
  21. batch_size = 128
  22. display_step = 10
  23. # Network Parameters
  24. n_input = 28 # MNIST data input (img shape: 28*28)
  25. n_steps = 28 # timesteps
  26. n_hidden = 128 # hidden layer num of features
  27. n_classes = 10 # MNIST total classes (0-9 digits)
  28. # tf Graph input
  29. x = tf.placeholder("float", [None, n_steps, n_input])
  30. istate = tf.placeholder("float", [None, 2*n_hidden]) #state & cell => 2x n_hidden
  31. y = tf.placeholder("float", [None, n_classes])
  32. # Define weights
  33. weights = {
  34. 'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
  35. 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
  36. }
  37. biases = {
  38. 'hidden': tf.Variable(tf.random_normal([n_hidden])),
  39. 'out': tf.Variable(tf.random_normal([n_classes]))
  40. }
  41. def RNN(_X, _istate, _weights, _biases):
  42. # input shape: (batch_size, n_steps, n_input)
  43. _X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
  44. # Reshape to prepare input to hidden activation
  45. _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
  46. # Linear activation
  47. _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']
  48. # Define a lstm cell with tensorflow
  49. lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
  50. # Split data because rnn cell needs a list of inputs for the RNN inner loop
  51. _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)
  52. # Get lstm cell output
  53. outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)
  54. # Linear activation
  55. # Get inner loop last output
  56. return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
  57. pred = RNN(x, istate, weights, biases)
  58. # Define loss and optimizer
  59. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
  60. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
  61. # Evaluate model
  62. correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
  63. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.types.float32))
  64. # Initializing the variables
  65. init = tf.initialize_all_variables()
  66. # Launch the graph
  67. with tf.Session() as sess:
  68. sess.run(init)
  69. step = 1
  70. # Keep training until reach max iterations
  71. while step * batch_size < training_iters:
  72. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  73. # Reshape data to get 28 seq of 28 elements
  74. batch_xs = batch_xs.reshape((batch_size, n_steps, n_input))
  75. # Fit training using batch data
  76. sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,
  77. istate: np.zeros((batch_size, 2*n_hidden))})
  78. if step % display_step == 0:
  79. # Calculate batch accuracy
  80. acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys,
  81. istate: np.zeros((batch_size, 2*n_hidden))})
  82. # Calculate batch loss
  83. loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys,
  84. istate: np.zeros((batch_size, 2*n_hidden))})
  85. print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + \
  86. ", Training Accuracy= " + "{:.5f}".format(acc)
  87. step += 1
  88. print "Optimization Finished!"
  89. # Calculate accuracy for 256 mnist test images
  90. test_len = 256
  91. test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
  92. test_label = mnist.test.labels[:test_len]
  93. print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label,
  94. istate: np.zeros((test_len, 2*n_hidden))})