cluttered_mnist.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import tensorflow as tf
  16. from spatial_transformer import transformer
  17. from scipy import ndimage
  18. import numpy as np
  19. import matplotlib.pyplot as plt
  20. from tf_utils import conv2d, linear, weight_variable, bias_variable, dense_to_one_hot
  21. # %% Load data
  22. mnist_cluttered = np.load('./data/mnist_sequence1_sample_5distortions5x5.npz')
  23. X_train = mnist_cluttered['X_train']
  24. y_train = mnist_cluttered['y_train']
  25. X_valid = mnist_cluttered['X_valid']
  26. y_valid = mnist_cluttered['y_valid']
  27. X_test = mnist_cluttered['X_test']
  28. y_test = mnist_cluttered['y_test']
  29. # % turn from dense to one hot representation
  30. Y_train = dense_to_one_hot(y_train, n_classes=10)
  31. Y_valid = dense_to_one_hot(y_valid, n_classes=10)
  32. Y_test = dense_to_one_hot(y_test, n_classes=10)
  33. # %% Graph representation of our network
  34. # %% Placeholders for 40x40 resolution
  35. x = tf.placeholder(tf.float32, [None, 1600])
  36. y = tf.placeholder(tf.float32, [None, 10])
  37. # %% Since x is currently [batch, height*width], we need to reshape to a
  38. # 4-D tensor to use it in a convolutional graph. If one component of
  39. # `shape` is the special value -1, the size of that dimension is
  40. # computed so that the total size remains constant. Since we haven't
  41. # defined the batch dimension's shape yet, we use -1 to denote this
  42. # dimension should not change size.
  43. x_tensor = tf.reshape(x, [-1, 40, 40, 1])
  44. # %% We'll setup the two-layer localisation network to figure out the parameters for an affine transformation of the input
  45. # %% Create variables for fully connected layer
  46. W_fc_loc1 = weight_variable([1600, 20])
  47. b_fc_loc1 = bias_variable([20])
  48. W_fc_loc2 = weight_variable([20, 6])
  49. initial = np.array([[1.,0, 0],[0,1.,0]]) # Use identity transformation as starting point
  50. initial = initial.astype('float32')
  51. initial = initial.flatten()
  52. b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
  53. # %% Define the two layer localisation network
  54. h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
  55. # %% We can add dropout for regularizing and to reduce overfitting like so:
  56. keep_prob = tf.placeholder(tf.float32)
  57. h_fc_loc1_drop = tf.nn.dropout(h_fc_loc1, keep_prob)
  58. # %% Second layer
  59. h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1_drop, W_fc_loc2) + b_fc_loc2)
  60. # %% We'll create a spatial transformer module to identify discriminative patches
  61. h_trans = transformer(x_tensor, h_fc_loc2, downsample_factor=1)
  62. # %% We'll setup the first convolutional layer
  63. # Weight matrix is [height x width x input_channels x output_channels]
  64. filter_size = 3
  65. n_filters_1 = 16
  66. W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1])
  67. # %% Bias is [output_channels]
  68. b_conv1 = bias_variable([n_filters_1])
  69. # %% Now we can build a graph which does the first layer of convolution:
  70. # we define our stride as batch x height x width x channels
  71. # instead of pooling, we use strides of 2 and more layers
  72. # with smaller filters.
  73. h_conv1 = tf.nn.relu(
  74. tf.nn.conv2d(input=h_trans,
  75. filter=W_conv1,
  76. strides=[1, 2, 2, 1],
  77. padding='SAME') +
  78. b_conv1)
  79. # %% And just like the first layer, add additional layers to create
  80. # a deep net
  81. n_filters_2 = 16
  82. W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2])
  83. b_conv2 = bias_variable([n_filters_2])
  84. h_conv2 = tf.nn.relu(
  85. tf.nn.conv2d(input=h_conv1,
  86. filter=W_conv2,
  87. strides=[1, 2, 2, 1],
  88. padding='SAME') +
  89. b_conv2)
  90. # %% We'll now reshape so we can connect to a fully-connected layer:
  91. h_conv2_flat = tf.reshape(h_conv2, [-1, 10 * 10 * n_filters_2])
  92. # %% Create a fully-connected layer:
  93. n_fc = 1024
  94. W_fc1 = weight_variable([10 * 10 * n_filters_2, n_fc])
  95. b_fc1 = bias_variable([n_fc])
  96. h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
  97. h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
  98. # %% And finally our softmax layer:
  99. W_fc2 = weight_variable([n_fc, 10])
  100. b_fc2 = bias_variable([10])
  101. y_pred = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
  102. # %% Define loss/eval/training functions
  103. cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
  104. opt = tf.train.AdamOptimizer()
  105. optimizer = opt.minimize(cross_entropy)
  106. grads = opt.compute_gradients(cross_entropy, [b_fc_loc2])
  107. # %% Monitor accuracy
  108. correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
  109. accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
  110. # %% We now create a new session to actually perform the initialization the
  111. # variables:
  112. sess = tf.Session()
  113. sess.run(tf.initialize_all_variables())
  114. # %% We'll now train in minibatches and report accuracy, loss:
  115. iter_per_epoch = 100
  116. n_epochs = 500
  117. train_size = 10000
  118. indices = np.linspace(0,10000 - 1,iter_per_epoch)
  119. indices = indices.astype('int')
  120. for epoch_i in range(n_epochs):
  121. for iter_i in range(iter_per_epoch - 1):
  122. batch_xs = X_train[indices[iter_i]:indices[iter_i+1]]
  123. batch_ys = Y_train[indices[iter_i]:indices[iter_i+1]]
  124. if iter_i % 10 == 0:
  125. loss = sess.run(cross_entropy,
  126. feed_dict={
  127. x: batch_xs,
  128. y: batch_ys,
  129. keep_prob: 1.0
  130. })
  131. print('Iteration: ' + str(iter_i) + ' Loss: ' + str(loss))
  132. sess.run(optimizer, feed_dict={
  133. x: batch_xs, y: batch_ys, keep_prob: 0.8})
  134. print('Accuracy: ' + str(sess.run(accuracy,
  135. feed_dict={
  136. x: X_valid,
  137. y: Y_valid,
  138. keep_prob: 1.0
  139. })))
  140. #theta = sess.run(h_fc_loc2, feed_dict={
  141. # x: batch_xs, keep_prob: 1.0})
  142. #print(theta[0])