tensorflow_dataset_api.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. """ TensorFlow Dataset API.
  2. In this example, we will show how to load numpy array data into the new
  3. TensorFlow 'Dataset' API. The Dataset API implements an optimized data pipeline
  4. with queues, that make data processing and training faster (especially on GPU).
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. """
  8. from __future__ import print_function
  9. import tensorflow as tf
  10. # Import MNIST data (Numpy format)
  11. from tensorflow.examples.tutorials.mnist import input_data
  12. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  13. # Parameters
  14. learning_rate = 0.001
  15. num_steps = 2000
  16. batch_size = 128
  17. display_step = 100
  18. # Network Parameters
  19. n_input = 784 # MNIST data input (img shape: 28*28)
  20. n_classes = 10 # MNIST total classes (0-9 digits)
  21. dropout = 0.75 # Dropout, probability to keep units
  22. sess = tf.Session()
  23. # Create a dataset tensor from the images and the labels
  24. dataset = tf.data.Dataset.from_tensor_slices(
  25. (mnist.train.images, mnist.train.labels))
  26. # Automatically refill the data queue when empty
  27. dataset = dataset.repeat()
  28. # Create batches of data
  29. dataset = dataset.batch(batch_size)
  30. # Prefetch data for faster consumption
  31. dataset = dataset.prefetch(batch_size)
  32. # Create an iterator over the dataset
  33. iterator = dataset.make_initializable_iterator()
  34. # Initialize the iterator
  35. sess.run(iterator.initializer)
  36. # Neural Net Input (images, labels)
  37. X, Y = iterator.get_next()
  38. # -----------------------------------------------
  39. # THIS IS A CLASSIC CNN (see examples, section 3)
  40. # -----------------------------------------------
  41. # Note that a few elements have changed (usage of sess run).
  42. # Create model
  43. def conv_net(x, n_classes, dropout, reuse, is_training):
  44. # Define a scope for reusing the variables
  45. with tf.variable_scope('ConvNet', reuse=reuse):
  46. # MNIST data input is a 1-D vector of 784 features (28*28 pixels)
  47. # Reshape to match picture format [Height x Width x Channel]
  48. # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
  49. x = tf.reshape(x, shape=[-1, 28, 28, 1])
  50. # Convolution Layer with 32 filters and a kernel size of 5
  51. conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
  52. # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
  53. conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
  54. # Convolution Layer with 32 filters and a kernel size of 5
  55. conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
  56. # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
  57. conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
  58. # Flatten the data to a 1-D vector for the fully connected layer
  59. fc1 = tf.contrib.layers.flatten(conv2)
  60. # Fully connected layer (in contrib folder for now)
  61. fc1 = tf.layers.dense(fc1, 1024)
  62. # Apply Dropout (if is_training is False, dropout is not applied)
  63. fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
  64. # Output layer, class prediction
  65. out = tf.layers.dense(fc1, n_classes)
  66. # Because 'softmax_cross_entropy_with_logits' already apply softmax,
  67. # we only apply softmax to testing network
  68. out = tf.nn.softmax(out) if not is_training else out
  69. return out
  70. # Because Dropout have different behavior at training and prediction time, we
  71. # need to create 2 distinct computation graphs that share the same weights.
  72. # Create a graph for training
  73. logits_train = conv_net(X, n_classes, dropout, reuse=False, is_training=True)
  74. # Create another graph for testing that reuse the same weights, but has
  75. # different behavior for 'dropout' (not applied).
  76. logits_test = conv_net(X, n_classes, dropout, reuse=True, is_training=False)
  77. # Define loss and optimizer (with train logits, for dropout to take effect)
  78. loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
  79. logits=logits_train, labels=Y))
  80. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  81. train_op = optimizer.minimize(loss_op)
  82. # Evaluate model (with test logits, for dropout to be disabled)
  83. correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(Y, 1))
  84. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  85. # Initialize the variables (i.e. assign their default value)
  86. init = tf.global_variables_initializer()
  87. # Run the initializer
  88. sess.run(init)
  89. # Training cycle
  90. for step in range(1, num_steps + 1):
  91. # Run optimization
  92. sess.run(train_op)
  93. if step % display_step == 0 or step == 1:
  94. # Calculate batch loss and accuracy
  95. # (note that this consume a new batch of data)
  96. loss, acc = sess.run([loss_op, accuracy])
  97. print("Step " + str(step) + ", Minibatch Loss= " + \
  98. "{:.4f}".format(loss) + ", Training Accuracy= " + \
  99. "{:.3f}".format(acc))
  100. print("Optimization Finished!")