cifar10.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Builds the CIFAR-10 network.
  16. Summary of available functions:
  17. # Compute input images and labels for training. If you would like to run
  18. # evaluations, use inputs() instead.
  19. inputs, labels = distorted_inputs()
  20. # Compute inference on the model inputs to make a prediction.
  21. predictions = inference(inputs)
  22. # Compute the total loss of the prediction with respect to the labels.
  23. loss = loss(predictions, labels)
  24. # Create a graph to run one step of training with respect to the loss.
  25. train_op = train(loss, global_step)
  26. """
  27. # pylint: disable=missing-docstring
  28. from __future__ import absolute_import
  29. from __future__ import division
  30. from __future__ import print_function
  31. import gzip
  32. import os
  33. import re
  34. import sys
  35. import tarfile
  36. from six.moves import urllib
  37. import tensorflow as tf
  38. import cifar10_input
  39. FLAGS = tf.app.flags.FLAGS
  40. # Basic model parameters.
  41. tf.app.flags.DEFINE_integer('batch_size', 128,
  42. """Number of images to process in a batch.""")
  43. tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
  44. """Path to the CIFAR-10 data directory.""")
  45. tf.app.flags.DEFINE_boolean('use_fp16', False,
  46. """Train the model using fp16.""")
  47. # Global constants describing the CIFAR-10 data set.
  48. IMAGE_SIZE = cifar10_input.IMAGE_SIZE
  49. NUM_CLASSES = cifar10_input.NUM_CLASSES
  50. NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
  51. NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
  52. # Constants describing the training process.
  53. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
  54. NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
  55. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
  56. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
  57. # If a model is trained with multiple GPUs, prefix all Op names with tower_name
  58. # to differentiate the operations. Note that this prefix is removed from the
  59. # names of the summaries when visualizing a model.
  60. TOWER_NAME = 'tower'
  61. DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
  62. def _activation_summary(x):
  63. """Helper to create summaries for activations.
  64. Creates a summary that provides a histogram of activations.
  65. Creates a summary that measures the sparsity of activations.
  66. Args:
  67. x: Tensor
  68. Returns:
  69. nothing
  70. """
  71. # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  72. # session. This helps the clarity of presentation on tensorboard.
  73. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  74. tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
  75. tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity',
  76. tf.nn.zero_fraction(x))
  77. def _variable_on_cpu(name, shape, initializer):
  78. """Helper to create a Variable stored on CPU memory.
  79. Args:
  80. name: name of the variable
  81. shape: list of ints
  82. initializer: initializer for Variable
  83. Returns:
  84. Variable Tensor
  85. """
  86. with tf.device('/cpu:0'):
  87. dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  88. var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
  89. return var
  90. def _variable_with_weight_decay(name, shape, stddev, wd):
  91. """Helper to create an initialized Variable with weight decay.
  92. Note that the Variable is initialized with a truncated normal distribution.
  93. A weight decay is added only if one is specified.
  94. Args:
  95. name: name of the variable
  96. shape: list of ints
  97. stddev: standard deviation of a truncated Gaussian
  98. wd: add L2Loss weight decay multiplied by this float. If None, weight
  99. decay is not added for this Variable.
  100. Returns:
  101. Variable Tensor
  102. """
  103. dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  104. var = _variable_on_cpu(
  105. name,
  106. shape,
  107. tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  108. if wd is not None:
  109. weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
  110. tf.add_to_collection('losses', weight_decay)
  111. return var
  112. def distorted_inputs():
  113. """Construct distorted input for CIFAR training using the Reader ops.
  114. Returns:
  115. images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
  116. labels: Labels. 1D tensor of [batch_size] size.
  117. Raises:
  118. ValueError: If no data_dir
  119. """
  120. if not FLAGS.data_dir:
  121. raise ValueError('Please supply a data_dir')
  122. data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  123. images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
  124. batch_size=FLAGS.batch_size)
  125. if FLAGS.use_fp16:
  126. images = tf.cast(images, tf.float16)
  127. labels = tf.cast(labels, tf.float16)
  128. return images, labels
  129. def inputs(eval_data):
  130. """Construct input for CIFAR evaluation using the Reader ops.
  131. Args:
  132. eval_data: bool, indicating if one should use the train or eval data set.
  133. Returns:
  134. images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
  135. labels: Labels. 1D tensor of [batch_size] size.
  136. Raises:
  137. ValueError: If no data_dir
  138. """
  139. if not FLAGS.data_dir:
  140. raise ValueError('Please supply a data_dir')
  141. data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  142. images, labels = cifar10_input.inputs(eval_data=eval_data,
  143. data_dir=data_dir,
  144. batch_size=FLAGS.batch_size)
  145. if FLAGS.use_fp16:
  146. images = tf.cast(images, tf.float16)
  147. labels = tf.cast(labels, tf.float16)
  148. return images, labels
  149. def inference(images):
  150. """Build the CIFAR-10 model.
  151. Args:
  152. images: Images returned from distorted_inputs() or inputs().
  153. Returns:
  154. Logits.
  155. """
  156. # We instantiate all variables using tf.get_variable() instead of
  157. # tf.Variable() in order to share variables across multiple GPU training runs.
  158. # If we only ran this model on a single GPU, we could simplify this function
  159. # by replacing all instances of tf.get_variable() with tf.Variable().
  160. #
  161. # conv1
  162. with tf.variable_scope('conv1') as scope:
  163. kernel = _variable_with_weight_decay('weights',
  164. shape=[5, 5, 3, 64],
  165. stddev=5e-2,
  166. wd=0.0)
  167. conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
  168. biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
  169. pre_activation = tf.nn.bias_add(conv, biases)
  170. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  171. _activation_summary(conv1)
  172. # pool1
  173. pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  174. padding='SAME', name='pool1')
  175. # norm1
  176. norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
  177. name='norm1')
  178. # conv2
  179. with tf.variable_scope('conv2') as scope:
  180. kernel = _variable_with_weight_decay('weights',
  181. shape=[5, 5, 64, 64],
  182. stddev=5e-2,
  183. wd=0.0)
  184. conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
  185. biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
  186. pre_activation = tf.nn.bias_add(conv, biases)
  187. conv2 = tf.nn.relu(pre_activation, name=scope.name)
  188. _activation_summary(conv2)
  189. # norm2
  190. norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
  191. name='norm2')
  192. # pool2
  193. pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
  194. strides=[1, 2, 2, 1], padding='SAME', name='pool2')
  195. # local3
  196. with tf.variable_scope('local3') as scope:
  197. # Move everything into depth so we can perform a single matrix multiply.
  198. reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
  199. dim = reshape.get_shape()[1].value
  200. weights = _variable_with_weight_decay('weights', shape=[dim, 384],
  201. stddev=0.04, wd=0.004)
  202. biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
  203. local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  204. _activation_summary(local3)
  205. # local4
  206. with tf.variable_scope('local4') as scope:
  207. weights = _variable_with_weight_decay('weights', shape=[384, 192],
  208. stddev=0.04, wd=0.004)
  209. biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
  210. local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
  211. _activation_summary(local4)
  212. # linear layer(WX + b),
  213. # We don't apply softmax here because
  214. # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
  215. # and performs the softmax internally for efficiency.
  216. with tf.variable_scope('softmax_linear') as scope:
  217. weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
  218. stddev=1/192.0, wd=0.0)
  219. biases = _variable_on_cpu('biases', [NUM_CLASSES],
  220. tf.constant_initializer(0.0))
  221. softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
  222. _activation_summary(softmax_linear)
  223. return softmax_linear
  224. def loss(logits, labels):
  225. """Add L2Loss to all the trainable variables.
  226. Add summary for "Loss" and "Loss/avg".
  227. Args:
  228. logits: Logits from inference().
  229. labels: Labels from distorted_inputs or inputs(). 1-D tensor
  230. of shape [batch_size]
  231. Returns:
  232. Loss tensor of type float.
  233. """
  234. # Calculate the average cross entropy loss across the batch.
  235. labels = tf.cast(labels, tf.int64)
  236. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
  237. logits, labels, name='cross_entropy_per_example')
  238. cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  239. tf.add_to_collection('losses', cross_entropy_mean)
  240. # The total loss is defined as the cross entropy loss plus all of the weight
  241. # decay terms (L2 loss).
  242. return tf.add_n(tf.get_collection('losses'), name='total_loss')
  243. def _add_loss_summaries(total_loss):
  244. """Add summaries for losses in CIFAR-10 model.
  245. Generates moving average for all losses and associated summaries for
  246. visualizing the performance of the network.
  247. Args:
  248. total_loss: Total loss from loss().
  249. Returns:
  250. loss_averages_op: op for generating moving averages of losses.
  251. """
  252. # Compute the moving average of all individual losses and the total loss.
  253. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  254. losses = tf.get_collection('losses')
  255. loss_averages_op = loss_averages.apply(losses + [total_loss])
  256. # Attach a scalar summary to all individual losses and the total loss; do the
  257. # same for the averaged version of the losses.
  258. for l in losses + [total_loss]:
  259. # Name each loss as '(raw)' and name the moving average version of the loss
  260. # as the original loss name.
  261. tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l)
  262. tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l))
  263. return loss_averages_op
  264. def train(total_loss, global_step):
  265. """Train CIFAR-10 model.
  266. Create an optimizer and apply to all trainable variables. Add moving
  267. average for all trainable variables.
  268. Args:
  269. total_loss: Total loss from loss().
  270. global_step: Integer Variable counting the number of training steps
  271. processed.
  272. Returns:
  273. train_op: op for training.
  274. """
  275. # Variables that affect learning rate.
  276. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
  277. decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
  278. # Decay the learning rate exponentially based on the number of steps.
  279. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
  280. global_step,
  281. decay_steps,
  282. LEARNING_RATE_DECAY_FACTOR,
  283. staircase=True)
  284. tf.contrib.deprecated.scalar_summary('learning_rate', lr)
  285. # Generate moving averages of all losses and associated summaries.
  286. loss_averages_op = _add_loss_summaries(total_loss)
  287. # Compute gradients.
  288. with tf.control_dependencies([loss_averages_op]):
  289. opt = tf.train.GradientDescentOptimizer(lr)
  290. grads = opt.compute_gradients(total_loss)
  291. # Apply gradients.
  292. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
  293. # Add histograms for trainable variables.
  294. for var in tf.trainable_variables():
  295. tf.contrib.deprecated.histogram_summary(var.op.name, var)
  296. # Add histograms for gradients.
  297. for grad, var in grads:
  298. if grad is not None:
  299. tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)
  300. # Track the moving averages of all trainable variables.
  301. variable_averages = tf.train.ExponentialMovingAverage(
  302. MOVING_AVERAGE_DECAY, global_step)
  303. variables_averages_op = variable_averages.apply(tf.trainable_variables())
  304. with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
  305. train_op = tf.no_op(name='train')
  306. return train_op
  307. def maybe_download_and_extract():
  308. """Download and extract the tarball from Alex's website."""
  309. dest_directory = FLAGS.data_dir
  310. if not os.path.exists(dest_directory):
  311. os.makedirs(dest_directory)
  312. filename = DATA_URL.split('/')[-1]
  313. filepath = os.path.join(dest_directory, filename)
  314. if not os.path.exists(filepath):
  315. def _progress(count, block_size, total_size):
  316. sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
  317. float(count * block_size) / float(total_size) * 100.0))
  318. sys.stdout.flush()
  319. filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
  320. print()
  321. statinfo = os.stat(filepath)
  322. print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
  323. tarfile.open(filepath, 'r:gz').extractall(dest_directory)