ops.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. # Copyright 2016 Google Inc. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Contains convenience wrappers for typical Neural Network TensorFlow layers.
  16. Additionally it maintains a collection with update_ops that need to be
  17. updated after the ops have been computed, for exmaple to update moving means
  18. and moving variances of batch_norm.
  19. Ops that have different behavior during training or eval have an is_training
  20. parameter. Additionally Ops that contain variables.variable have a trainable
  21. parameter, which control if the ops variables are trainable or not.
  22. """
  23. from __future__ import absolute_import
  24. from __future__ import division
  25. from __future__ import print_function
  26. import tensorflow as tf
  27. from tensorflow.python.training import moving_averages
  28. from inception.slim import losses
  29. from inception.slim import scopes
  30. from inception.slim import variables
  31. # Used to keep the update ops done by batch_norm.
  32. UPDATE_OPS_COLLECTION = '_update_ops_'
  33. @scopes.add_arg_scope
  34. def batch_norm(inputs,
  35. decay=0.999,
  36. scale=False,
  37. epsilon=0.001,
  38. moving_vars='moving_vars',
  39. activation=None,
  40. is_training=True,
  41. trainable=True,
  42. restore=True,
  43. scope=None):
  44. """Adds a Batch Normalization layer.
  45. Args:
  46. inputs: a tensor of size [batch_size, height, width, channels]
  47. or [batch_size, channels].
  48. decay: decay for the moving average.
  49. scale: If True, multiply by gamma. If False, gamma is
  50. not used. When the next layer is linear (also e.g. ReLU), this can be
  51. disabled since the scaling can be done by the next layer.
  52. epsilon: small float added to variance to avoid dividing by zero.
  53. moving_vars: collection to store the moving_mean and moving_variance.
  54. activation: activation function.
  55. is_training: whether or not the model is in training mode.
  56. trainable: whether or not the variables should be trainable or not.
  57. restore: whether or not the variables should be marked for restore.
  58. scope: Optional scope for variable_op_scope.
  59. Returns:
  60. a tensor representing the output of the operation.
  61. """
  62. inputs_shape = inputs.get_shape()
  63. with tf.variable_op_scope([inputs], scope, 'BatchNorm'):
  64. axis = range(len(inputs_shape) - 1)
  65. params_shape = inputs_shape[-1:]
  66. with scopes.arg_scope([variables.variable], restore=restore):
  67. # Allocate parameters for the beta and gamma of the normalization.
  68. beta = variables.variable('beta',
  69. params_shape,
  70. initializer=tf.zeros_initializer,
  71. trainable=trainable)
  72. if scale:
  73. gamma = variables.variable('gamma',
  74. params_shape,
  75. initializer=tf.ones,
  76. trainable=trainable)
  77. else:
  78. gamma = None
  79. # Create moving_mean and moving_variance add them to moving_vars and
  80. # GraphKeys.MOVING_AVERAGE_VARIABLES collections.
  81. with scopes.arg_scope([variables.variable], trainable=False,
  82. collections=[
  83. moving_vars,
  84. tf.GraphKeys.MOVING_AVERAGE_VARIABLES]):
  85. moving_mean = variables.variable('moving_mean',
  86. params_shape,
  87. initializer=tf.zeros_initializer)
  88. moving_variance = variables.variable('moving_variance',
  89. params_shape,
  90. initializer=tf.ones)
  91. if is_training:
  92. # Calculate the moments based on the individual batch.
  93. mean, variance = tf.nn.moments(inputs, axis)
  94. update_moving_mean = moving_averages.assign_moving_average(
  95. moving_mean, mean, decay)
  96. tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
  97. update_moving_variance = moving_averages.assign_moving_average(
  98. moving_variance, variance, decay)
  99. tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
  100. else:
  101. # Just use the moving_mean and moving_variance.
  102. mean = moving_mean
  103. variance = moving_variance
  104. # Normalize the activations.
  105. outputs = tf.nn.batch_normalization(
  106. inputs, mean, variance, beta, gamma, epsilon)
  107. outputs.set_shape(inputs.get_shape())
  108. if activation:
  109. outputs = activation(outputs)
  110. return outputs
  111. @scopes.add_arg_scope
  112. def conv2d(inputs,
  113. num_filters_out,
  114. kernel_size,
  115. stride=1,
  116. padding='SAME',
  117. activation=tf.nn.relu,
  118. stddev=0.01,
  119. bias=0.0,
  120. weight_decay=0,
  121. batch_norm_params=None,
  122. is_training=True,
  123. trainable=True,
  124. restore=True,
  125. scope=None):
  126. """Adds a 2D convolution followed by an optional batch_norm layer.
  127. conv2d creates a variable called 'weights', representing the convolutional
  128. kernel, that is convolved with the input. If `batch_norm_params` is None, a
  129. second variable called 'biases' is added to the result of the convolution
  130. operation.
  131. Args:
  132. inputs: a tensor of size [batch_size, height, width, channels].
  133. num_filters_out: the number of output filters.
  134. kernel_size: a 2-D list comprising of the height and width of the filters.
  135. stride: the stride in height and width of the convolution.
  136. padding: one of 'VALID' or 'SAME'.
  137. activation: activation function.
  138. stddev: standard deviation of the truncated guassian weight distribution.
  139. bias: the initial value of the biases.
  140. weight_decay: the weight decay.
  141. batch_norm_params: parameters for the batch_norm. If is None don't use it.
  142. is_training: whether or not the model is in training mode.
  143. trainable: whether or not the variables should be trainable or not.
  144. restore: whether or not the variables should be marked for restore.
  145. scope: Optional scope for variable_op_scope.
  146. Returns:
  147. a tensor representing the output of the operation.
  148. Raises:
  149. ValueError: if 'kernel_size' is not a 2-D list.
  150. """
  151. if len(kernel_size) != 2:
  152. raise ValueError('kernel_size must be a 2-D list.')
  153. with tf.variable_op_scope([inputs], scope, 'Conv'):
  154. num_filters_in = inputs.get_shape()[-1]
  155. weights_shape = [kernel_size[0], kernel_size[1],
  156. num_filters_in, num_filters_out]
  157. weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
  158. l2_regularizer = lambda t: losses.l2_loss(t, weight_decay)
  159. weights = variables.variable('weights',
  160. shape=weights_shape,
  161. initializer=weights_initializer,
  162. regularizer=l2_regularizer,
  163. trainable=trainable,
  164. restore=restore)
  165. conv = tf.nn.conv2d(inputs, weights, [1, stride, stride, 1],
  166. padding=padding)
  167. if batch_norm_params is not None:
  168. with scopes.arg_scope([batch_norm], is_training=is_training,
  169. trainable=trainable, restore=restore):
  170. outputs = batch_norm(conv, **batch_norm_params)
  171. else:
  172. bias_shape = [num_filters_out,]
  173. bias_initializer = tf.constant_initializer(bias)
  174. biases = variables.variable('biases',
  175. shape=bias_shape,
  176. initializer=bias_initializer,
  177. trainable=trainable,
  178. restore=restore)
  179. outputs = tf.nn.bias_add(conv, biases)
  180. if activation:
  181. outputs = activation(outputs)
  182. return outputs
  183. @scopes.add_arg_scope
  184. def fc(inputs,
  185. num_units_out,
  186. activation=tf.nn.relu,
  187. stddev=0.01,
  188. bias=0.0,
  189. weight_decay=0,
  190. batch_norm_params=None,
  191. is_training=True,
  192. trainable=True,
  193. restore=True,
  194. scope=None):
  195. """Adds a fully connected layer followed by an optional batch_norm layer.
  196. FC creates a variable called 'weights', representing the fully connected
  197. weight matrix, that is multiplied by the input. If `batch_norm` is None, a
  198. second variable called 'biases' is added to the result of the initial
  199. vector-matrix multiplication.
  200. Args:
  201. inputs: a [B x N] tensor where B is the batch size and N is the number of
  202. input units in the layer.
  203. num_units_out: the number of output units in the layer.
  204. activation: activation function.
  205. stddev: the standard deviation for the weights.
  206. bias: the initial value of the biases.
  207. weight_decay: the weight decay.
  208. batch_norm_params: parameters for the batch_norm. If is None don't use it.
  209. is_training: whether or not the model is in training mode.
  210. trainable: whether or not the variables should be trainable or not.
  211. restore: whether or not the variables should be marked for restore.
  212. scope: Optional scope for variable_op_scope.
  213. Returns:
  214. the tensor variable representing the result of the series of operations.
  215. """
  216. with tf.variable_op_scope([inputs], scope, 'FC'):
  217. num_units_in = inputs.get_shape()[1]
  218. weights_shape = [num_units_in, num_units_out]
  219. weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
  220. l2_regularizer = lambda t: losses.l2_loss(t, weight_decay)
  221. weights = variables.variable('weights',
  222. shape=weights_shape,
  223. initializer=weights_initializer,
  224. regularizer=l2_regularizer,
  225. trainable=trainable,
  226. restore=restore)
  227. if batch_norm_params is not None:
  228. outputs = tf.matmul(inputs, weights)
  229. with scopes.arg_scope([batch_norm], is_training=is_training,
  230. trainable=trainable, restore=restore):
  231. outputs = batch_norm(outputs, **batch_norm_params)
  232. else:
  233. bias_shape = [num_units_out,]
  234. bias_initializer = tf.constant_initializer(bias)
  235. biases = variables.variable('biases',
  236. shape=bias_shape,
  237. initializer=bias_initializer,
  238. trainable=trainable,
  239. restore=restore)
  240. outputs = tf.nn.xw_plus_b(inputs, weights, biases)
  241. if activation:
  242. outputs = activation(outputs)
  243. return outputs
  244. def one_hot_encoding(labels, num_classes, scope=None):
  245. """Transform numeric labels into onehot_labels.
  246. Args:
  247. labels: [batch_size] target labels.
  248. num_classes: total number of classes.
  249. scope: Optional scope for op_scope.
  250. Returns:
  251. one hot encoding of the labels.
  252. """
  253. with tf.op_scope([labels], scope, 'OneHotEncoding'):
  254. batch_size = labels.get_shape()[0]
  255. indices = tf.expand_dims(tf.range(0, batch_size), 1)
  256. labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
  257. concated = tf.concat(1, [indices, labels])
  258. onehot_labels = tf.sparse_to_dense(
  259. concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
  260. onehot_labels.set_shape([batch_size, num_classes])
  261. return onehot_labels
  262. @scopes.add_arg_scope
  263. def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
  264. """Adds a Max Pooling layer.
  265. It is assumed by the wrapper that the pooling is only done per image and not
  266. in depth or batch.
  267. Args:
  268. inputs: a tensor of size [batch_size, height, width, depth].
  269. kernel_size: the size of the pooling kernel over which the op is computed.
  270. stride: the stride in height and width of the convolution.
  271. padding: the padding method, either 'VALID' or 'SAME'.
  272. scope: Optional scope for op_scope.
  273. Returns:
  274. a tensor representing the results of the pooling operation.
  275. Raises:
  276. ValueError: if 'kernel_size' is not a 2-D list
  277. """
  278. if len(kernel_size) != 2:
  279. raise ValueError('kernel_size must be a 2-D list.')
  280. with tf.op_scope([inputs], scope, 'MaxPool'):
  281. return tf.nn.max_pool(inputs,
  282. ksize=[1, kernel_size[0], kernel_size[1], 1],
  283. strides=[1, stride, stride, 1],
  284. padding=padding)
  285. @scopes.add_arg_scope
  286. def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
  287. """Adds a Avg Pooling layer.
  288. It is assumed by the wrapper that the pooling is only done per image and not
  289. in depth or batch.
  290. Args:
  291. inputs: a tensor of size [batch_size, height, width, depth].
  292. kernel_size: the size of the pooling kernel over which the op is computed.
  293. stride: the stride in height and width of the convolution.
  294. padding: the padding method, either 'VALID' or 'SAME'.
  295. scope: Optional scope for op_scope.
  296. Returns:
  297. a tensor representing the results of the pooling operation.
  298. Raises:
  299. ValueError: if 'kernel_size' is not a 2-D list
  300. """
  301. if len(kernel_size) != 2:
  302. raise ValueError('kernel_size must be a 2-D list.')
  303. with tf.op_scope([inputs], scope, 'AvgPool'):
  304. return tf.nn.avg_pool(inputs,
  305. ksize=[1, kernel_size[0], kernel_size[1], 1],
  306. strides=[1, stride, stride, 1],
  307. padding=padding)
  308. @scopes.add_arg_scope
  309. def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
  310. """Returns a dropout layer applied to the input.
  311. Args:
  312. inputs: the tensor to pass to the Dropout layer.
  313. keep_prob: the probability of dropping each input unit.
  314. is_training: whether or not the model is in training mode. If so, dropout is
  315. applied and values scaled. Otherwise, inputs is returned.
  316. scope: Optional scope for op_scope.
  317. Returns:
  318. a tensor representing the output of the operation.
  319. """
  320. if is_training and keep_prob > 0:
  321. with tf.op_scope([inputs], scope, 'Dropout'):
  322. return tf.nn.dropout(inputs, keep_prob)
  323. else:
  324. return inputs
  325. def flatten(inputs, scope=None):
  326. """Flattens the input while maintaining the batch_size.
  327. Assumes that the first dimension represents the batch.
  328. Args:
  329. inputs: a tensor of size [batch_size, ...].
  330. scope: Optional scope for op_scope.
  331. Returns:
  332. a flattened tensor with shape [batch_size, k].
  333. Raises:
  334. ValueError: if inputs.shape is wrong.
  335. """
  336. if len(inputs.get_shape()) < 2:
  337. raise ValueError('Inputs must be have a least 2 dimensions')
  338. dims = inputs.get_shape()[1:]
  339. k = dims.num_elements()
  340. with tf.op_scope([inputs], scope, 'Flatten'):
  341. return tf.reshape(inputs, [-1, k])
  342. def repeat_op(repetitions, inputs, op, *args, **kwargs):
  343. """Build a sequential Tower starting from inputs by using an op repeatedly.
  344. It creates new scopes for each operation by increasing the counter.
  345. Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
  346. it will repeat the given op under the following variable_scopes:
  347. conv1/Conv
  348. conv1/Conv_1
  349. conv1/Conv_2
  350. Args:
  351. repetitions: number or repetitions.
  352. inputs: a tensor of size [batch_size, height, width, channels].
  353. op: an operation.
  354. *args: args for the op.
  355. **kwargs: kwargs for the op.
  356. Returns:
  357. a tensor result of applying the operation op, num times.
  358. Raises:
  359. ValueError: if the op is unknown or wrong.
  360. """
  361. scope = kwargs.pop('scope', None)
  362. with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
  363. tower = inputs
  364. for _ in range(repetitions):
  365. tower = op(tower, *args, **kwargs)
  366. return tower