inception_v2.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Contains the definition for inception v2 classification network."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import tensorflow as tf
  20. slim = tf.contrib.slim
  21. trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
  22. def inception_v2_base(inputs,
  23. final_endpoint='Mixed_5c',
  24. min_depth=16,
  25. depth_multiplier=1.0,
  26. scope=None):
  27. """Inception v2 (6a2).
  28. Constructs an Inception v2 network from inputs to the given final endpoint.
  29. This method can construct the network up to the layer inception(5b) as
  30. described in http://arxiv.org/abs/1502.03167.
  31. Args:
  32. inputs: a tensor of shape [batch_size, height, width, channels].
  33. final_endpoint: specifies the endpoint to construct the network up to. It
  34. can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
  35. 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
  36. 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
  37. 'Mixed_5c'].
  38. min_depth: Minimum depth value (number of channels) for all convolution ops.
  39. Enforced when depth_multiplier < 1, and not an active constraint when
  40. depth_multiplier >= 1.
  41. depth_multiplier: Float multiplier for the depth (number of channels)
  42. for all convolution ops. The value must be greater than zero. Typical
  43. usage will be to set this value in (0, 1) to reduce the number of
  44. parameters or computation cost of the model.
  45. scope: Optional variable_scope.
  46. Returns:
  47. tensor_out: output tensor corresponding to the final_endpoint.
  48. end_points: a set of activations for external use, for example summaries or
  49. losses.
  50. Raises:
  51. ValueError: if final_endpoint is not set to one of the predefined values,
  52. or depth_multiplier <= 0
  53. """
  54. # end_points will collect relevant activations for external use, for example
  55. # summaries or losses.
  56. end_points = {}
  57. # Used to find thinned depths for each layer.
  58. if depth_multiplier <= 0:
  59. raise ValueError('depth_multiplier is not greater than zero.')
  60. depth = lambda d: max(int(d * depth_multiplier), min_depth)
  61. with tf.variable_scope(scope, 'InceptionV2', [inputs]):
  62. with slim.arg_scope(
  63. [slim.conv2d, slim.max_pool2d, slim.avg_pool2d, slim.separable_conv2d],
  64. stride=1, padding='SAME'):
  65. # Note that sizes in the comments below assume an input spatial size of
  66. # 224x224, however, the inputs can be of any size greater 32x32.
  67. # 224 x 224 x 3
  68. end_point = 'Conv2d_1a_7x7'
  69. # depthwise_multiplier here is different from depth_multiplier.
  70. # depthwise_multiplier determines the output channels of the initial
  71. # depthwise conv (see docs for tf.nn.separable_conv2d), while
  72. # depth_multiplier controls the # channels of the subsequent 1x1
  73. # convolution. Must have
  74. # in_channels * depthwise_multipler <= out_channels
  75. # so that the separable convolution is not overparameterized.
  76. depthwise_multiplier = min(int(depth(64) / 3), 8)
  77. net = slim.separable_conv2d(
  78. inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier,
  79. stride=2, weights_initializer=trunc_normal(1.0),
  80. scope=end_point)
  81. end_points[end_point] = net
  82. if end_point == final_endpoint: return net, end_points
  83. # 112 x 112 x 64
  84. end_point = 'MaxPool_2a_3x3'
  85. net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
  86. end_points[end_point] = net
  87. if end_point == final_endpoint: return net, end_points
  88. # 56 x 56 x 64
  89. end_point = 'Conv2d_2b_1x1'
  90. net = slim.conv2d(net, depth(64), [1, 1], scope=end_point,
  91. weights_initializer=trunc_normal(0.1))
  92. end_points[end_point] = net
  93. if end_point == final_endpoint: return net, end_points
  94. # 56 x 56 x 64
  95. end_point = 'Conv2d_2c_3x3'
  96. net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
  97. end_points[end_point] = net
  98. if end_point == final_endpoint: return net, end_points
  99. # 56 x 56 x 192
  100. end_point = 'MaxPool_3a_3x3'
  101. net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
  102. end_points[end_point] = net
  103. if end_point == final_endpoint: return net, end_points
  104. # 28 x 28 x 192
  105. # Inception module.
  106. end_point = 'Mixed_3b'
  107. with tf.variable_scope(end_point):
  108. with tf.variable_scope('Branch_0'):
  109. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  110. with tf.variable_scope('Branch_1'):
  111. branch_1 = slim.conv2d(
  112. net, depth(64), [1, 1],
  113. weights_initializer=trunc_normal(0.09),
  114. scope='Conv2d_0a_1x1')
  115. branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
  116. scope='Conv2d_0b_3x3')
  117. with tf.variable_scope('Branch_2'):
  118. branch_2 = slim.conv2d(
  119. net, depth(64), [1, 1],
  120. weights_initializer=trunc_normal(0.09),
  121. scope='Conv2d_0a_1x1')
  122. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  123. scope='Conv2d_0b_3x3')
  124. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  125. scope='Conv2d_0c_3x3')
  126. with tf.variable_scope('Branch_3'):
  127. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  128. branch_3 = slim.conv2d(
  129. branch_3, depth(32), [1, 1],
  130. weights_initializer=trunc_normal(0.1),
  131. scope='Conv2d_0b_1x1')
  132. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  133. end_points[end_point] = net
  134. if end_point == final_endpoint: return net, end_points
  135. # 28 x 28 x 256
  136. end_point = 'Mixed_3c'
  137. with tf.variable_scope(end_point):
  138. with tf.variable_scope('Branch_0'):
  139. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  140. with tf.variable_scope('Branch_1'):
  141. branch_1 = slim.conv2d(
  142. net, depth(64), [1, 1],
  143. weights_initializer=trunc_normal(0.09),
  144. scope='Conv2d_0a_1x1')
  145. branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
  146. scope='Conv2d_0b_3x3')
  147. with tf.variable_scope('Branch_2'):
  148. branch_2 = slim.conv2d(
  149. net, depth(64), [1, 1],
  150. weights_initializer=trunc_normal(0.09),
  151. scope='Conv2d_0a_1x1')
  152. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  153. scope='Conv2d_0b_3x3')
  154. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  155. scope='Conv2d_0c_3x3')
  156. with tf.variable_scope('Branch_3'):
  157. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  158. branch_3 = slim.conv2d(
  159. branch_3, depth(64), [1, 1],
  160. weights_initializer=trunc_normal(0.1),
  161. scope='Conv2d_0b_1x1')
  162. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  163. end_points[end_point] = net
  164. if end_point == final_endpoint: return net, end_points
  165. # 28 x 28 x 320
  166. end_point = 'Mixed_4a'
  167. with tf.variable_scope(end_point):
  168. with tf.variable_scope('Branch_0'):
  169. branch_0 = slim.conv2d(
  170. net, depth(128), [1, 1],
  171. weights_initializer=trunc_normal(0.09),
  172. scope='Conv2d_0a_1x1')
  173. branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
  174. scope='Conv2d_1a_3x3')
  175. with tf.variable_scope('Branch_1'):
  176. branch_1 = slim.conv2d(
  177. net, depth(64), [1, 1],
  178. weights_initializer=trunc_normal(0.09),
  179. scope='Conv2d_0a_1x1')
  180. branch_1 = slim.conv2d(
  181. branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
  182. branch_1 = slim.conv2d(
  183. branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
  184. with tf.variable_scope('Branch_2'):
  185. branch_2 = slim.max_pool2d(
  186. net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
  187. net = tf.concat(3, [branch_0, branch_1, branch_2])
  188. end_points[end_point] = net
  189. if end_point == final_endpoint: return net, end_points
  190. # 14 x 14 x 576
  191. end_point = 'Mixed_4b'
  192. with tf.variable_scope(end_point):
  193. with tf.variable_scope('Branch_0'):
  194. branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
  195. with tf.variable_scope('Branch_1'):
  196. branch_1 = slim.conv2d(
  197. net, depth(64), [1, 1],
  198. weights_initializer=trunc_normal(0.09),
  199. scope='Conv2d_0a_1x1')
  200. branch_1 = slim.conv2d(
  201. branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
  202. with tf.variable_scope('Branch_2'):
  203. branch_2 = slim.conv2d(
  204. net, depth(96), [1, 1],
  205. weights_initializer=trunc_normal(0.09),
  206. scope='Conv2d_0a_1x1')
  207. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  208. scope='Conv2d_0b_3x3')
  209. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  210. scope='Conv2d_0c_3x3')
  211. with tf.variable_scope('Branch_3'):
  212. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  213. branch_3 = slim.conv2d(
  214. branch_3, depth(128), [1, 1],
  215. weights_initializer=trunc_normal(0.1),
  216. scope='Conv2d_0b_1x1')
  217. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  218. end_points[end_point] = net
  219. if end_point == final_endpoint: return net, end_points
  220. # 14 x 14 x 576
  221. end_point = 'Mixed_4c'
  222. with tf.variable_scope(end_point):
  223. with tf.variable_scope('Branch_0'):
  224. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  225. with tf.variable_scope('Branch_1'):
  226. branch_1 = slim.conv2d(
  227. net, depth(96), [1, 1],
  228. weights_initializer=trunc_normal(0.09),
  229. scope='Conv2d_0a_1x1')
  230. branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
  231. scope='Conv2d_0b_3x3')
  232. with tf.variable_scope('Branch_2'):
  233. branch_2 = slim.conv2d(
  234. net, depth(96), [1, 1],
  235. weights_initializer=trunc_normal(0.09),
  236. scope='Conv2d_0a_1x1')
  237. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  238. scope='Conv2d_0b_3x3')
  239. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  240. scope='Conv2d_0c_3x3')
  241. with tf.variable_scope('Branch_3'):
  242. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  243. branch_3 = slim.conv2d(
  244. branch_3, depth(128), [1, 1],
  245. weights_initializer=trunc_normal(0.1),
  246. scope='Conv2d_0b_1x1')
  247. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  248. end_points[end_point] = net
  249. if end_point == final_endpoint: return net, end_points
  250. # 14 x 14 x 576
  251. end_point = 'Mixed_4d'
  252. with tf.variable_scope(end_point):
  253. with tf.variable_scope('Branch_0'):
  254. branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  255. with tf.variable_scope('Branch_1'):
  256. branch_1 = slim.conv2d(
  257. net, depth(128), [1, 1],
  258. weights_initializer=trunc_normal(0.09),
  259. scope='Conv2d_0a_1x1')
  260. branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
  261. scope='Conv2d_0b_3x3')
  262. with tf.variable_scope('Branch_2'):
  263. branch_2 = slim.conv2d(
  264. net, depth(128), [1, 1],
  265. weights_initializer=trunc_normal(0.09),
  266. scope='Conv2d_0a_1x1')
  267. branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
  268. scope='Conv2d_0b_3x3')
  269. branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
  270. scope='Conv2d_0c_3x3')
  271. with tf.variable_scope('Branch_3'):
  272. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  273. branch_3 = slim.conv2d(
  274. branch_3, depth(96), [1, 1],
  275. weights_initializer=trunc_normal(0.1),
  276. scope='Conv2d_0b_1x1')
  277. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  278. end_points[end_point] = net
  279. if end_point == final_endpoint: return net, end_points
  280. # 14 x 14 x 576
  281. end_point = 'Mixed_4e'
  282. with tf.variable_scope(end_point):
  283. with tf.variable_scope('Branch_0'):
  284. branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
  285. with tf.variable_scope('Branch_1'):
  286. branch_1 = slim.conv2d(
  287. net, depth(128), [1, 1],
  288. weights_initializer=trunc_normal(0.09),
  289. scope='Conv2d_0a_1x1')
  290. branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
  291. scope='Conv2d_0b_3x3')
  292. with tf.variable_scope('Branch_2'):
  293. branch_2 = slim.conv2d(
  294. net, depth(160), [1, 1],
  295. weights_initializer=trunc_normal(0.09),
  296. scope='Conv2d_0a_1x1')
  297. branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
  298. scope='Conv2d_0b_3x3')
  299. branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
  300. scope='Conv2d_0c_3x3')
  301. with tf.variable_scope('Branch_3'):
  302. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  303. branch_3 = slim.conv2d(
  304. branch_3, depth(96), [1, 1],
  305. weights_initializer=trunc_normal(0.1),
  306. scope='Conv2d_0b_1x1')
  307. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  308. end_points[end_point] = net
  309. if end_point == final_endpoint: return net, end_points
  310. # 14 x 14 x 576
  311. end_point = 'Mixed_5a'
  312. with tf.variable_scope(end_point):
  313. with tf.variable_scope('Branch_0'):
  314. branch_0 = slim.conv2d(
  315. net, depth(128), [1, 1],
  316. weights_initializer=trunc_normal(0.09),
  317. scope='Conv2d_0a_1x1')
  318. branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
  319. scope='Conv2d_1a_3x3')
  320. with tf.variable_scope('Branch_1'):
  321. branch_1 = slim.conv2d(
  322. net, depth(192), [1, 1],
  323. weights_initializer=trunc_normal(0.09),
  324. scope='Conv2d_0a_1x1')
  325. branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
  326. scope='Conv2d_0b_3x3')
  327. branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
  328. scope='Conv2d_1a_3x3')
  329. with tf.variable_scope('Branch_2'):
  330. branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
  331. scope='MaxPool_1a_3x3')
  332. net = tf.concat(3, [branch_0, branch_1, branch_2])
  333. end_points[end_point] = net
  334. if end_point == final_endpoint: return net, end_points
  335. # 7 x 7 x 1024
  336. end_point = 'Mixed_5b'
  337. with tf.variable_scope(end_point):
  338. with tf.variable_scope('Branch_0'):
  339. branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
  340. with tf.variable_scope('Branch_1'):
  341. branch_1 = slim.conv2d(
  342. net, depth(192), [1, 1],
  343. weights_initializer=trunc_normal(0.09),
  344. scope='Conv2d_0a_1x1')
  345. branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
  346. scope='Conv2d_0b_3x3')
  347. with tf.variable_scope('Branch_2'):
  348. branch_2 = slim.conv2d(
  349. net, depth(160), [1, 1],
  350. weights_initializer=trunc_normal(0.09),
  351. scope='Conv2d_0a_1x1')
  352. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  353. scope='Conv2d_0b_3x3')
  354. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  355. scope='Conv2d_0c_3x3')
  356. with tf.variable_scope('Branch_3'):
  357. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  358. branch_3 = slim.conv2d(
  359. branch_3, depth(128), [1, 1],
  360. weights_initializer=trunc_normal(0.1),
  361. scope='Conv2d_0b_1x1')
  362. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  363. end_points[end_point] = net
  364. if end_point == final_endpoint: return net, end_points
  365. # 7 x 7 x 1024
  366. end_point = 'Mixed_5c'
  367. with tf.variable_scope(end_point):
  368. with tf.variable_scope('Branch_0'):
  369. branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
  370. with tf.variable_scope('Branch_1'):
  371. branch_1 = slim.conv2d(
  372. net, depth(192), [1, 1],
  373. weights_initializer=trunc_normal(0.09),
  374. scope='Conv2d_0a_1x1')
  375. branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
  376. scope='Conv2d_0b_3x3')
  377. with tf.variable_scope('Branch_2'):
  378. branch_2 = slim.conv2d(
  379. net, depth(192), [1, 1],
  380. weights_initializer=trunc_normal(0.09),
  381. scope='Conv2d_0a_1x1')
  382. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  383. scope='Conv2d_0b_3x3')
  384. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  385. scope='Conv2d_0c_3x3')
  386. with tf.variable_scope('Branch_3'):
  387. branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
  388. branch_3 = slim.conv2d(
  389. branch_3, depth(128), [1, 1],
  390. weights_initializer=trunc_normal(0.1),
  391. scope='Conv2d_0b_1x1')
  392. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  393. end_points[end_point] = net
  394. if end_point == final_endpoint: return net, end_points
  395. raise ValueError('Unknown final endpoint %s' % final_endpoint)
  396. def inception_v2(inputs,
  397. num_classes=1000,
  398. is_training=True,
  399. dropout_keep_prob=0.8,
  400. min_depth=16,
  401. depth_multiplier=1.0,
  402. prediction_fn=slim.softmax,
  403. spatial_squeeze=True,
  404. reuse=None,
  405. scope='InceptionV2'):
  406. """Inception v2 model for classification.
  407. Constructs an Inception v2 network for classification as described in
  408. http://arxiv.org/abs/1502.03167.
  409. The default image size used to train this network is 224x224.
  410. Args:
  411. inputs: a tensor of shape [batch_size, height, width, channels].
  412. num_classes: number of predicted classes.
  413. is_training: whether is training or not.
  414. dropout_keep_prob: the percentage of activation values that are retained.
  415. min_depth: Minimum depth value (number of channels) for all convolution ops.
  416. Enforced when depth_multiplier < 1, and not an active constraint when
  417. depth_multiplier >= 1.
  418. depth_multiplier: Float multiplier for the depth (number of channels)
  419. for all convolution ops. The value must be greater than zero. Typical
  420. usage will be to set this value in (0, 1) to reduce the number of
  421. parameters or computation cost of the model.
  422. prediction_fn: a function to get predictions out of logits.
  423. spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
  424. of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
  425. reuse: whether or not the network and its variables should be reused. To be
  426. able to reuse 'scope' must be given.
  427. scope: Optional variable_scope.
  428. Returns:
  429. logits: the pre-softmax activations, a tensor of size
  430. [batch_size, num_classes]
  431. end_points: a dictionary from components of the network to the corresponding
  432. activation.
  433. Raises:
  434. ValueError: if final_endpoint is not set to one of the predefined values,
  435. or depth_multiplier <= 0
  436. """
  437. if depth_multiplier <= 0:
  438. raise ValueError('depth_multiplier is not greater than zero.')
  439. # Final pooling and prediction
  440. with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes],
  441. reuse=reuse) as scope:
  442. with slim.arg_scope([slim.batch_norm, slim.dropout],
  443. is_training=is_training):
  444. net, end_points = inception_v2_base(
  445. inputs, scope=scope, min_depth=min_depth,
  446. depth_multiplier=depth_multiplier)
  447. with tf.variable_scope('Logits'):
  448. kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
  449. net = slim.avg_pool2d(net, kernel_size, padding='VALID',
  450. scope='AvgPool_1a_{}x{}'.format(*kernel_size))
  451. # 1 x 1 x 1024
  452. net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
  453. logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
  454. normalizer_fn=None, scope='Conv2d_1c_1x1')
  455. if spatial_squeeze:
  456. logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
  457. end_points['Logits'] = logits
  458. end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  459. return logits, end_points
  460. inception_v2.default_image_size = 224
  461. def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  462. """Define kernel size which is automatically reduced for small input.
  463. If the shape of the input images is unknown at graph construction time this
  464. function assumes that the input images are is large enough.
  465. Args:
  466. input_tensor: input tensor of size [batch_size, height, width, channels].
  467. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
  468. Returns:
  469. a tensor with the kernel size.
  470. TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  471. can be done with the code below. Problems are two-fold: (1) If the shape was
  472. known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  473. handle tensors that define the kernel size.
  474. shape = tf.shape(input_tensor)
  475. return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
  476. tf.minimum(shape[2], kernel_size[1])])
  477. """
  478. shape = input_tensor.get_shape().as_list()
  479. if shape[1] is None or shape[2] is None:
  480. kernel_size_out = kernel_size
  481. else:
  482. kernel_size_out = [min(shape[1], kernel_size[0]),
  483. min(shape[2], kernel_size[1])]
  484. return kernel_size_out
  485. def inception_v2_arg_scope(weight_decay=0.00004):
  486. """Defines the default InceptionV2 arg scope.
  487. Args:
  488. weight_decay: The weight decay to use for regularizing the model.
  489. Returns:
  490. An `arg_scope` to use for the inception v3 model.
  491. """
  492. batch_norm_params = {
  493. # Decay for the moving averages.
  494. 'decay': 0.9997,
  495. # epsilon to prevent 0s in variance.
  496. 'epsilon': 0.001,
  497. # collection containing update_ops.
  498. 'updates_collections': tf.GraphKeys.UPDATE_OPS,
  499. }
  500. # Set weight_decay for weights in Conv and FC layers.
  501. with slim.arg_scope([slim.conv2d, slim.fully_connected],
  502. weights_regularizer=slim.l2_regularizer(weight_decay)):
  503. with slim.arg_scope(
  504. [slim.conv2d],
  505. weights_initializer=slim.variance_scaling_initializer(),
  506. activation_fn=tf.nn.relu,
  507. normalizer_fn=slim.batch_norm,
  508. normalizer_params=batch_norm_params) as sc:
  509. return sc