cifar10.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Builds the CIFAR-10 network.
  16. Summary of available functions:
  17. # Compute input images and labels for training. If you would like to run
  18. # evaluations, use inputs() instead.
  19. inputs, labels = distorted_inputs()
  20. # Compute inference on the model inputs to make a prediction.
  21. predictions = inference(inputs)
  22. # Compute the total loss of the prediction with respect to the labels.
  23. loss = loss(predictions, labels)
  24. # Create a graph to run one step of training with respect to the loss.
  25. train_op = train(loss, global_step)
  26. """
  27. # pylint: disable=missing-docstring
  28. from __future__ import absolute_import
  29. from __future__ import division
  30. from __future__ import print_function
  31. import os
  32. import re
  33. import sys
  34. import tarfile
  35. from six.moves import urllib
  36. import tensorflow as tf
  37. import cifar10_input
  38. FLAGS = tf.app.flags.FLAGS
  39. # Basic model parameters.
  40. tf.app.flags.DEFINE_integer('batch_size', 128,
  41. """Number of images to process in a batch.""")
  42. tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
  43. """Path to the CIFAR-10 data directory.""")
  44. tf.app.flags.DEFINE_boolean('use_fp16', False,
  45. """Train the model using fp16.""")
  46. # Global constants describing the CIFAR-10 data set.
  47. IMAGE_SIZE = cifar10_input.IMAGE_SIZE
  48. NUM_CLASSES = cifar10_input.NUM_CLASSES
  49. NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
  50. NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
  51. # Constants describing the training process.
  52. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
  53. NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
  54. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
  55. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
  56. # If a model is trained with multiple GPUs, prefix all Op names with tower_name
  57. # to differentiate the operations. Note that this prefix is removed from the
  58. # names of the summaries when visualizing a model.
  59. TOWER_NAME = 'tower'
  60. DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
  61. def _activation_summary(x):
  62. """Helper to create summaries for activations.
  63. Creates a summary that provides a histogram of activations.
  64. Creates a summary that measures the sparsity of activations.
  65. Args:
  66. x: Tensor
  67. Returns:
  68. nothing
  69. """
  70. # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  71. # session. This helps the clarity of presentation on tensorboard.
  72. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  73. tf.summary.histogram(tensor_name + '/activations', x)
  74. tf.summary.scalar(tensor_name + '/sparsity',
  75. tf.nn.zero_fraction(x))
  76. def _variable_on_cpu(name, shape, initializer):
  77. """Helper to create a Variable stored on CPU memory.
  78. Args:
  79. name: name of the variable
  80. shape: list of ints
  81. initializer: initializer for Variable
  82. Returns:
  83. Variable Tensor
  84. """
  85. with tf.device('/cpu:0'):
  86. dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  87. var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
  88. return var
  89. def _variable_with_weight_decay(name, shape, stddev, wd):
  90. """Helper to create an initialized Variable with weight decay.
  91. Note that the Variable is initialized with a truncated normal distribution.
  92. A weight decay is added only if one is specified.
  93. Args:
  94. name: name of the variable
  95. shape: list of ints
  96. stddev: standard deviation of a truncated Gaussian
  97. wd: add L2Loss weight decay multiplied by this float. If None, weight
  98. decay is not added for this Variable.
  99. Returns:
  100. Variable Tensor
  101. """
  102. dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  103. var = _variable_on_cpu(
  104. name,
  105. shape,
  106. tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  107. if wd is not None:
  108. weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
  109. tf.add_to_collection('losses', weight_decay)
  110. return var
  111. def distorted_inputs():
  112. """Construct distorted input for CIFAR training using the Reader ops.
  113. Returns:
  114. images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
  115. labels: Labels. 1D tensor of [batch_size] size.
  116. Raises:
  117. ValueError: If no data_dir
  118. """
  119. if not FLAGS.data_dir:
  120. raise ValueError('Please supply a data_dir')
  121. data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  122. images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
  123. batch_size=FLAGS.batch_size)
  124. if FLAGS.use_fp16:
  125. images = tf.cast(images, tf.float16)
  126. labels = tf.cast(labels, tf.float16)
  127. return images, labels
  128. def inputs(eval_data):
  129. """Construct input for CIFAR evaluation using the Reader ops.
  130. Args:
  131. eval_data: bool, indicating if one should use the train or eval data set.
  132. Returns:
  133. images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
  134. labels: Labels. 1D tensor of [batch_size] size.
  135. Raises:
  136. ValueError: If no data_dir
  137. """
  138. if not FLAGS.data_dir:
  139. raise ValueError('Please supply a data_dir')
  140. data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  141. images, labels = cifar10_input.inputs(eval_data=eval_data,
  142. data_dir=data_dir,
  143. batch_size=FLAGS.batch_size)
  144. if FLAGS.use_fp16:
  145. images = tf.cast(images, tf.float16)
  146. labels = tf.cast(labels, tf.float16)
  147. return images, labels
  148. def inference(images):
  149. """Build the CIFAR-10 model.
  150. Args:
  151. images: Images returned from distorted_inputs() or inputs().
  152. Returns:
  153. Logits.
  154. """
  155. # We instantiate all variables using tf.get_variable() instead of
  156. # tf.Variable() in order to share variables across multiple GPU training runs.
  157. # If we only ran this model on a single GPU, we could simplify this function
  158. # by replacing all instances of tf.get_variable() with tf.Variable().
  159. #
  160. # conv1
  161. with tf.variable_scope('conv1') as scope:
  162. kernel = _variable_with_weight_decay('weights',
  163. shape=[5, 5, 3, 64],
  164. stddev=5e-2,
  165. wd=0.0)
  166. conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
  167. biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
  168. pre_activation = tf.nn.bias_add(conv, biases)
  169. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  170. _activation_summary(conv1)
  171. # pool1
  172. pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  173. padding='SAME', name='pool1')
  174. # norm1
  175. norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
  176. name='norm1')
  177. # conv2
  178. with tf.variable_scope('conv2') as scope:
  179. kernel = _variable_with_weight_decay('weights',
  180. shape=[5, 5, 64, 64],
  181. stddev=5e-2,
  182. wd=0.0)
  183. conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
  184. biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
  185. pre_activation = tf.nn.bias_add(conv, biases)
  186. conv2 = tf.nn.relu(pre_activation, name=scope.name)
  187. _activation_summary(conv2)
  188. # norm2
  189. norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
  190. name='norm2')
  191. # pool2
  192. pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
  193. strides=[1, 2, 2, 1], padding='SAME', name='pool2')
  194. # local3
  195. with tf.variable_scope('local3') as scope:
  196. # Move everything into depth so we can perform a single matrix multiply.
  197. reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
  198. dim = reshape.get_shape()[1].value
  199. weights = _variable_with_weight_decay('weights', shape=[dim, 384],
  200. stddev=0.04, wd=0.004)
  201. biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
  202. local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  203. _activation_summary(local3)
  204. # local4
  205. with tf.variable_scope('local4') as scope:
  206. weights = _variable_with_weight_decay('weights', shape=[384, 192],
  207. stddev=0.04, wd=0.004)
  208. biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
  209. local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
  210. _activation_summary(local4)
  211. # linear layer(WX + b),
  212. # We don't apply softmax here because
  213. # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
  214. # and performs the softmax internally for efficiency.
  215. with tf.variable_scope('softmax_linear') as scope:
  216. weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
  217. stddev=1/192.0, wd=0.0)
  218. biases = _variable_on_cpu('biases', [NUM_CLASSES],
  219. tf.constant_initializer(0.0))
  220. softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
  221. _activation_summary(softmax_linear)
  222. return softmax_linear
  223. def loss(logits, labels):
  224. """Add L2Loss to all the trainable variables.
  225. Add summary for "Loss" and "Loss/avg".
  226. Args:
  227. logits: Logits from inference().
  228. labels: Labels from distorted_inputs or inputs(). 1-D tensor
  229. of shape [batch_size]
  230. Returns:
  231. Loss tensor of type float.
  232. """
  233. # Calculate the average cross entropy loss across the batch.
  234. labels = tf.cast(labels, tf.int64)
  235. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
  236. labels=labels, logits=logits, name='cross_entropy_per_example')
  237. cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  238. tf.add_to_collection('losses', cross_entropy_mean)
  239. # The total loss is defined as the cross entropy loss plus all of the weight
  240. # decay terms (L2 loss).
  241. return tf.add_n(tf.get_collection('losses'), name='total_loss')
  242. def _add_loss_summaries(total_loss):
  243. """Add summaries for losses in CIFAR-10 model.
  244. Generates moving average for all losses and associated summaries for
  245. visualizing the performance of the network.
  246. Args:
  247. total_loss: Total loss from loss().
  248. Returns:
  249. loss_averages_op: op for generating moving averages of losses.
  250. """
  251. # Compute the moving average of all individual losses and the total loss.
  252. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  253. losses = tf.get_collection('losses')
  254. loss_averages_op = loss_averages.apply(losses + [total_loss])
  255. # Attach a scalar summary to all individual losses and the total loss; do the
  256. # same for the averaged version of the losses.
  257. for l in losses + [total_loss]:
  258. # Name each loss as '(raw)' and name the moving average version of the loss
  259. # as the original loss name.
  260. tf.summary.scalar(l.op.name + ' (raw)', l)
  261. tf.summary.scalar(l.op.name, loss_averages.average(l))
  262. return loss_averages_op
  263. def train(total_loss, global_step):
  264. """Train CIFAR-10 model.
  265. Create an optimizer and apply to all trainable variables. Add moving
  266. average for all trainable variables.
  267. Args:
  268. total_loss: Total loss from loss().
  269. global_step: Integer Variable counting the number of training steps
  270. processed.
  271. Returns:
  272. train_op: op for training.
  273. """
  274. # Variables that affect learning rate.
  275. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
  276. decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
  277. # Decay the learning rate exponentially based on the number of steps.
  278. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
  279. global_step,
  280. decay_steps,
  281. LEARNING_RATE_DECAY_FACTOR,
  282. staircase=True)
  283. tf.summary.scalar('learning_rate', lr)
  284. # Generate moving averages of all losses and associated summaries.
  285. loss_averages_op = _add_loss_summaries(total_loss)
  286. # Compute gradients.
  287. with tf.control_dependencies([loss_averages_op]):
  288. opt = tf.train.GradientDescentOptimizer(lr)
  289. grads = opt.compute_gradients(total_loss)
  290. # Apply gradients.
  291. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
  292. # Add histograms for trainable variables.
  293. for var in tf.trainable_variables():
  294. tf.summary.histogram(var.op.name, var)
  295. # Add histograms for gradients.
  296. for grad, var in grads:
  297. if grad is not None:
  298. tf.summary.histogram(var.op.name + '/gradients', grad)
  299. # Track the moving averages of all trainable variables.
  300. variable_averages = tf.train.ExponentialMovingAverage(
  301. MOVING_AVERAGE_DECAY, global_step)
  302. variables_averages_op = variable_averages.apply(tf.trainable_variables())
  303. with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
  304. train_op = tf.no_op(name='train')
  305. return train_op
  306. def maybe_download_and_extract():
  307. """Download and extract the tarball from Alex's website."""
  308. dest_directory = FLAGS.data_dir
  309. if not os.path.exists(dest_directory):
  310. os.makedirs(dest_directory)
  311. filename = DATA_URL.split('/')[-1]
  312. filepath = os.path.join(dest_directory, filename)
  313. if not os.path.exists(filepath):
  314. def _progress(count, block_size, total_size):
  315. sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
  316. float(count * block_size) / float(total_size) * 100.0))
  317. sys.stdout.flush()
  318. filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
  319. print()
  320. statinfo = os.stat(filepath)
  321. print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
  322. extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
  323. if not os.path.exists(extracted_dir_path):
  324. tarfile.open(filepath, 'r:gz').extractall(dest_directory)