build_an_image_dataset.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. """ Build an Image Dataset in TensorFlow.
  2. For this example, you need to make your own set of images (JPEG).
  3. We will show 2 different ways to build that dataset:
  4. - From a root folder, that will have a sub-folder containing images for each class
  5. ```
  6. ROOT_FOLDER
  7. |-------- SUBFOLDER (CLASS 0)
  8. | |
  9. | | ----- image1.jpg
  10. | | ----- image2.jpg
  11. | | ----- etc...
  12. |
  13. |-------- SUBFOLDER (CLASS 1)
  14. | |
  15. | | ----- image1.jpg
  16. | | ----- image2.jpg
  17. | | ----- etc...
  18. ```
  19. - From a plain text file, that will list all images with their class ID:
  20. ```
  21. /path/to/image/1.jpg CLASS_ID
  22. /path/to/image/2.jpg CLASS_ID
  23. /path/to/image/3.jpg CLASS_ID
  24. /path/to/image/4.jpg CLASS_ID
  25. etc...
  26. ```
  27. Below, there are some parameters that you need to change (Marked 'CHANGE HERE'),
  28. such as the dataset path.
  29. Author: Aymeric Damien
  30. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  31. """
  32. from __future__ import print_function
  33. import tensorflow as tf
  34. import os
  35. # Dataset Parameters - CHANGE HERE
  36. MODE = 'folder' # or 'file', if you choose a plain text file (see above).
  37. DATASET_PATH = '/path/to/dataset/' # the dataset file or root folder path.
  38. # Image Parameters
  39. N_CLASSES = 2 # CHANGE HERE, total number of classes
  40. IMG_HEIGHT = 64 # CHANGE HERE, the image height to be resized to
  41. IMG_WIDTH = 64 # CHANGE HERE, the image width to be resized to
  42. CHANNELS = 3 # The 3 color channels, change to 1 if grayscale
  43. # Reading the dataset
  44. # 2 modes: 'file' or 'folder'
  45. def read_images(dataset_path, mode, batch_size):
  46. imagepaths, labels = list(), list()
  47. if mode == 'file':
  48. # Read dataset file
  49. with open(dataset_path) as f:
  50. data = f.read().splitlines()
  51. for d in data:
  52. imagepaths.append(d.split(' ')[0])
  53. labels.append(int(d.split(' ')[1]))
  54. elif mode == 'folder':
  55. # An ID will be affected to each sub-folders by alphabetical order
  56. label = 0
  57. # List the directory
  58. try: # Python 2
  59. classes = sorted(os.walk(dataset_path).next()[1])
  60. except Exception: # Python 3
  61. classes = sorted(os.walk(dataset_path).__next__()[1])
  62. # List each sub-directory (the classes)
  63. for c in classes:
  64. c_dir = os.path.join(dataset_path, c)
  65. try: # Python 2
  66. walk = os.walk(c_dir).next()
  67. except Exception: # Python 3
  68. walk = os.walk(c_dir).__next__()
  69. # Add each image to the training set
  70. for sample in walk[2]:
  71. # Only keeps jpeg images
  72. if sample.endswith('.jpg') or sample.endswith('.jpeg'):
  73. imagepaths.append(os.path.join(c_dir, sample))
  74. labels.append(label)
  75. label += 1
  76. else:
  77. raise Exception("Unknown mode.")
  78. # Convert to Tensor
  79. imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
  80. labels = tf.convert_to_tensor(labels, dtype=tf.int32)
  81. # Build a TF Queue, shuffle data
  82. image, label = tf.train.slice_input_producer([imagepaths, labels],
  83. shuffle=True)
  84. # Read images from disk
  85. image = tf.read_file(image)
  86. image = tf.image.decode_jpeg(image, channels=CHANNELS)
  87. # Resize images to a common size
  88. image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH])
  89. # Normalize
  90. image = image * 1.0/127.5 - 1.0
  91. # Create batches
  92. X, Y = tf.train.batch([image, label], batch_size=batch_size,
  93. capacity=batch_size * 8,
  94. num_threads=4)
  95. return X, Y
  96. # -----------------------------------------------
  97. # THIS IS A CLASSIC CNN (see examples, section 3)
  98. # -----------------------------------------------
  99. # Note that a few elements have changed (usage of queues).
  100. # Parameters
  101. learning_rate = 0.001
  102. num_steps = 10000
  103. batch_size = 128
  104. display_step = 100
  105. # Network Parameters
  106. dropout = 0.75 # Dropout, probability to keep units
  107. # Build the data input
  108. X, Y = read_images(DATASET_PATH, MODE, batch_size)
  109. # Create model
  110. def conv_net(x, n_classes, dropout, reuse, is_training):
  111. # Define a scope for reusing the variables
  112. with tf.variable_scope('ConvNet', reuse=reuse):
  113. # Convolution Layer with 32 filters and a kernel size of 5
  114. conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
  115. # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
  116. conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
  117. # Convolution Layer with 32 filters and a kernel size of 5
  118. conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
  119. # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
  120. conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
  121. # Flatten the data to a 1-D vector for the fully connected layer
  122. fc1 = tf.contrib.layers.flatten(conv2)
  123. # Fully connected layer (in contrib folder for now)
  124. fc1 = tf.layers.dense(fc1, 1024)
  125. # Apply Dropout (if is_training is False, dropout is not applied)
  126. fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
  127. # Output layer, class prediction
  128. out = tf.layers.dense(fc1, n_classes)
  129. # Because 'softmax_cross_entropy_with_logits' already apply softmax,
  130. # we only apply softmax to testing network
  131. out = tf.nn.softmax(out) if not is_training else out
  132. return out
  133. # Because Dropout have different behavior at training and prediction time, we
  134. # need to create 2 distinct computation graphs that share the same weights.
  135. # Create a graph for training
  136. logits_train = conv_net(X, N_CLASSES, dropout, reuse=False, is_training=True)
  137. # Create another graph for testing that reuse the same weights
  138. logits_test = conv_net(X, N_CLASSES, dropout, reuse=True, is_training=False)
  139. # Define loss and optimizer (with train logits, for dropout to take effect)
  140. loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
  141. logits=logits_train, labels=Y))
  142. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  143. train_op = optimizer.minimize(loss_op)
  144. # Evaluate model (with test logits, for dropout to be disabled)
  145. correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y, tf.int64))
  146. accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  147. # Initialize the variables (i.e. assign their default value)
  148. init = tf.global_variables_initializer()
  149. # Saver object
  150. saver = tf.train.Saver()
  151. # Start training
  152. with tf.Session() as sess:
  153. # Run the initializer
  154. sess.run(init)
  155. # Start the data queue
  156. tf.train.start_queue_runners()
  157. # Training cycle
  158. for step in range(1, num_steps+1):
  159. if step % display_step == 0:
  160. # Run optimization and calculate batch loss and accuracy
  161. _, loss, acc = sess.run([train_op, loss_op, accuracy])
  162. print("Step " + str(step) + ", Minibatch Loss= " + \
  163. "{:.4f}".format(loss) + ", Training Accuracy= " + \
  164. "{:.3f}".format(acc))
  165. else:
  166. # Only run the optimization op (backprop)
  167. sess.run(train_op)
  168. print("Optimization Finished!")
  169. # Save your model
  170. saver.save(sess, 'my_tf_model')