dynamic_rnn.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. '''
  2. A Dynamic Reccurent Neural Network (LSTM) implementation example using
  3. TensorFlow library. This example is using a toy dataset to classify linear
  4. sequences. The generated sequences have variable length.
  5. Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
  6. Author: Aymeric Damien
  7. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  8. '''
  9. import tensorflow as tf
  10. import random
  11. # ====================
  12. # TOY DATA GENERATOR
  13. # ====================
  14. class ToySequenceData(object):
  15. """ Generate sequence of data with dynamic length.
  16. This class generate samples for training:
  17. - Class 0: linear sequences (i.e. [0, 1, 2, 3,...])
  18. - Class 1: random sequences (i.e. [1, 3, 10, 7,...])
  19. NOTICE:
  20. We have to pad each sequence to reach 'max_seq_len' for TensorFlow
  21. consistency (we cannot feed a numpy array with unconsistent
  22. dimensions). The dynamic calculation will then be perform thanks to
  23. 'seqlen' attribute that records every actual sequence length.
  24. """
  25. def __init__(self, n_samples=1000, max_seq_len=20, min_seq_len=3,
  26. max_value=1000):
  27. self.data = []
  28. self.labels = []
  29. self.seqlen = []
  30. for i in range(n_samples):
  31. # Random sequence length
  32. len = random.randint(min_seq_len, max_seq_len)
  33. # Monitor sequence length for TensorFlow dynamic calculation
  34. self.seqlen.append(len)
  35. # Add a random or linear int sequence (50% prob)
  36. if random.random() < .5:
  37. # Generate a linear sequence
  38. rand_start = random.randint(0, max_value - len)
  39. s = [[float(i)/max_value] for i in
  40. range(rand_start, rand_start + len)]
  41. # Pad sequence for dimension consistency
  42. s += [[0.] for i in range(max_seq_len - len)]
  43. self.data.append(s)
  44. self.labels.append([1., 0.])
  45. else:
  46. # Generate a random sequence
  47. s = [[float(random.randint(0, max_value))/max_value]
  48. for i in range(len)]
  49. # Pad sequence for dimension consistency
  50. s += [[0.] for i in range(max_seq_len - len)]
  51. self.data.append(s)
  52. self.labels.append([0., 1.])
  53. self.batch_id = 0
  54. def next(self, batch_size):
  55. """ Return a batch of data. When dataset end is reached, start over.
  56. """
  57. if self.batch_id == len(self.data):
  58. self.batch_id = 0
  59. batch_data = (self.data[self.batch_id:min(self.batch_id +
  60. batch_size, len(self.data))])
  61. batch_labels = (self.labels[self.batch_id:min(self.batch_id +
  62. batch_size, len(self.data))])
  63. batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id +
  64. batch_size, len(self.data))])
  65. self.batch_id = min(self.batch_id + batch_size, len(self.data))
  66. return batch_data, batch_labels, batch_seqlen
  67. # ==========
  68. # MODEL
  69. # ==========
  70. # Parameters
  71. learning_rate = 0.01
  72. training_iters = 1000000
  73. batch_size = 128
  74. display_step = 10
  75. # Network Parameters
  76. seq_max_len = 20 # Sequence max length
  77. n_hidden = 64 # hidden layer num of features
  78. n_classes = 2 # linear sequence or not
  79. trainset = ToySequenceData(n_samples=1000, max_seq_len=seq_max_len)
  80. testset = ToySequenceData(n_samples=500, max_seq_len=seq_max_len)
  81. # tf Graph input
  82. x = tf.placeholder("float", [None, seq_max_len, 1])
  83. y = tf.placeholder("float", [None, n_classes])
  84. # A placeholder for indicating each sequence length
  85. seqlen = tf.placeholder(tf.int32, [None])
  86. # Define weights
  87. weights = {
  88. 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
  89. }
  90. biases = {
  91. 'out': tf.Variable(tf.random_normal([n_classes]))
  92. }
  93. def dynamicRNN(x, seqlen, weights, biases):
  94. # Prepare data shape to match `rnn` function requirements
  95. # Current data input shape: (batch_size, n_steps, n_input)
  96. # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
  97. # Permuting batch_size and n_steps
  98. x = tf.transpose(x, [1, 0, 2])
  99. # Reshaping to (n_steps*batch_size, n_input)
  100. x = tf.reshape(x, [-1, 1])
  101. # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
  102. x = tf.split(0, seq_max_len, x)
  103. # Define a lstm cell with tensorflow
  104. lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden)
  105. # Get lstm cell output, providing 'sequence_length' will perform dynamic
  106. # calculation.
  107. outputs, states = tf.nn.rnn(lstm_cell, x, dtype=tf.float32,
  108. sequence_length=seqlen)
  109. # When performing dynamic calculation, we must retrieve the last
  110. # dynamically computed output, i.e, if a sequence length is 10, we need
  111. # to retrieve the 10th output.
  112. # However TensorFlow doesn't support advanced indexing yet, so we build
  113. # a custom op that for each sample in batch size, get its length and
  114. # get the corresponding relevant output.
  115. # 'outputs' is a list of output at every timestep, we pack them in a Tensor
  116. # and change back dimension to [batch_size, n_step, n_input]
  117. outputs = tf.pack(outputs)
  118. outputs = tf.transpose(outputs, [1, 0, 2])
  119. # Hack to build the indexing and retrieve the right output.
  120. batch_size = tf.shape(outputs)[0]
  121. # Start indices for each sample
  122. index = tf.range(0, batch_size) * seq_max_len + (seqlen - 1)
  123. # Indexing
  124. outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index)
  125. # Linear activation, using outputs computed above
  126. return tf.matmul(outputs, weights['out']) + biases['out']
  127. pred = dynamicRNN(x, seqlen, weights, biases)
  128. # Define loss and optimizer
  129. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
  130. optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
  131. # Evaluate model
  132. correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
  133. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  134. # Initializing the variables
  135. init = tf.initialize_all_variables()
  136. # Launch the graph
  137. with tf.Session() as sess:
  138. sess.run(init)
  139. step = 1
  140. # Keep training until reach max iterations
  141. while step * batch_size < training_iters:
  142. batch_x, batch_y, batch_seqlen = trainset.next(batch_size)
  143. # Run optimization op (backprop)
  144. sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
  145. seqlen: batch_seqlen})
  146. if step % display_step == 0:
  147. # Calculate batch accuracy
  148. acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y,
  149. seqlen: batch_seqlen})
  150. # Calculate batch loss
  151. loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y,
  152. seqlen: batch_seqlen})
  153. print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
  154. "{:.6f}".format(loss) + ", Training Accuracy= " + \
  155. "{:.5f}".format(acc)
  156. step += 1
  157. print "Optimization Finished!"
  158. # Calculate accuracy
  159. test_data = testset.data
  160. test_label = testset.labels
  161. test_seqlen = testset.seqlen
  162. print "Testing Accuracy:", \
  163. sess.run(accuracy, feed_dict={x: test_data, y: test_label,
  164. seqlen: test_seqlen})