inception_v3.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Contains the definition for inception v3 classification network."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import tensorflow as tf
  20. from nets import inception_utils
  21. slim = tf.contrib.slim
  22. trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
  23. def inception_v3_base(inputs,
  24. final_endpoint='Mixed_7c',
  25. min_depth=16,
  26. depth_multiplier=1.0,
  27. scope=None):
  28. """Inception model from http://arxiv.org/abs/1512.00567.
  29. Constructs an Inception v3 network from inputs to the given final endpoint.
  30. This method can construct the network up to the final inception block
  31. Mixed_7c.
  32. Note that the names of the layers in the paper do not correspond to the names
  33. of the endpoints registered by this function although they build the same
  34. network.
  35. Here is a mapping from the old_names to the new names:
  36. Old name | New name
  37. =======================================
  38. conv0 | Conv2d_1a_3x3
  39. conv1 | Conv2d_2a_3x3
  40. conv2 | Conv2d_2b_3x3
  41. pool1 | MaxPool_3a_3x3
  42. conv3 | Conv2d_3b_1x1
  43. conv4 | Conv2d_4a_3x3
  44. pool2 | MaxPool_5a_3x3
  45. mixed_35x35x256a | Mixed_5b
  46. mixed_35x35x288a | Mixed_5c
  47. mixed_35x35x288b | Mixed_5d
  48. mixed_17x17x768a | Mixed_6a
  49. mixed_17x17x768b | Mixed_6b
  50. mixed_17x17x768c | Mixed_6c
  51. mixed_17x17x768d | Mixed_6d
  52. mixed_17x17x768e | Mixed_6e
  53. mixed_8x8x1280a | Mixed_7a
  54. mixed_8x8x2048a | Mixed_7b
  55. mixed_8x8x2048b | Mixed_7c
  56. Args:
  57. inputs: a tensor of size [batch_size, height, width, channels].
  58. final_endpoint: specifies the endpoint to construct the network up to. It
  59. can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
  60. 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
  61. 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
  62. 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
  63. min_depth: Minimum depth value (number of channels) for all convolution ops.
  64. Enforced when depth_multiplier < 1, and not an active constraint when
  65. depth_multiplier >= 1.
  66. depth_multiplier: Float multiplier for the depth (number of channels)
  67. for all convolution ops. The value must be greater than zero. Typical
  68. usage will be to set this value in (0, 1) to reduce the number of
  69. parameters or computation cost of the model.
  70. scope: Optional variable_scope.
  71. Returns:
  72. tensor_out: output tensor corresponding to the final_endpoint.
  73. end_points: a set of activations for external use, for example summaries or
  74. losses.
  75. Raises:
  76. ValueError: if final_endpoint is not set to one of the predefined values,
  77. or depth_multiplier <= 0
  78. """
  79. # end_points will collect relevant activations for external use, for example
  80. # summaries or losses.
  81. end_points = {}
  82. if depth_multiplier <= 0:
  83. raise ValueError('depth_multiplier is not greater than zero.')
  84. depth = lambda d: max(int(d * depth_multiplier), min_depth)
  85. with tf.variable_scope(scope, 'InceptionV3', [inputs]):
  86. with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
  87. stride=1, padding='VALID'):
  88. # 299 x 299 x 3
  89. end_point = 'Conv2d_1a_3x3'
  90. net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
  91. end_points[end_point] = net
  92. if end_point == final_endpoint: return net, end_points
  93. # 149 x 149 x 32
  94. end_point = 'Conv2d_2a_3x3'
  95. net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
  96. end_points[end_point] = net
  97. if end_point == final_endpoint: return net, end_points
  98. # 147 x 147 x 32
  99. end_point = 'Conv2d_2b_3x3'
  100. net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
  101. end_points[end_point] = net
  102. if end_point == final_endpoint: return net, end_points
  103. # 147 x 147 x 64
  104. end_point = 'MaxPool_3a_3x3'
  105. net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
  106. end_points[end_point] = net
  107. if end_point == final_endpoint: return net, end_points
  108. # 73 x 73 x 64
  109. end_point = 'Conv2d_3b_1x1'
  110. net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
  111. end_points[end_point] = net
  112. if end_point == final_endpoint: return net, end_points
  113. # 73 x 73 x 80.
  114. end_point = 'Conv2d_4a_3x3'
  115. net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
  116. end_points[end_point] = net
  117. if end_point == final_endpoint: return net, end_points
  118. # 71 x 71 x 192.
  119. end_point = 'MaxPool_5a_3x3'
  120. net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
  121. end_points[end_point] = net
  122. if end_point == final_endpoint: return net, end_points
  123. # 35 x 35 x 192.
  124. # Inception blocks
  125. with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
  126. stride=1, padding='SAME'):
  127. # mixed: 35 x 35 x 256.
  128. end_point = 'Mixed_5b'
  129. with tf.variable_scope(end_point):
  130. with tf.variable_scope('Branch_0'):
  131. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  132. with tf.variable_scope('Branch_1'):
  133. branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
  134. branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
  135. scope='Conv2d_0b_5x5')
  136. with tf.variable_scope('Branch_2'):
  137. branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  138. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  139. scope='Conv2d_0b_3x3')
  140. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  141. scope='Conv2d_0c_3x3')
  142. with tf.variable_scope('Branch_3'):
  143. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  144. branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
  145. scope='Conv2d_0b_1x1')
  146. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  147. end_points[end_point] = net
  148. if end_point == final_endpoint: return net, end_points
  149. # mixed_1: 35 x 35 x 288.
  150. end_point = 'Mixed_5c'
  151. with tf.variable_scope(end_point):
  152. with tf.variable_scope('Branch_0'):
  153. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  154. with tf.variable_scope('Branch_1'):
  155. branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
  156. branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
  157. scope='Conv_1_0c_5x5')
  158. with tf.variable_scope('Branch_2'):
  159. branch_2 = slim.conv2d(net, depth(64), [1, 1],
  160. scope='Conv2d_0a_1x1')
  161. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  162. scope='Conv2d_0b_3x3')
  163. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  164. scope='Conv2d_0c_3x3')
  165. with tf.variable_scope('Branch_3'):
  166. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  167. branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
  168. scope='Conv2d_0b_1x1')
  169. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  170. end_points[end_point] = net
  171. if end_point == final_endpoint: return net, end_points
  172. # mixed_2: 35 x 35 x 288.
  173. end_point = 'Mixed_5d'
  174. with tf.variable_scope(end_point):
  175. with tf.variable_scope('Branch_0'):
  176. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  177. with tf.variable_scope('Branch_1'):
  178. branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
  179. branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
  180. scope='Conv2d_0b_5x5')
  181. with tf.variable_scope('Branch_2'):
  182. branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  183. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  184. scope='Conv2d_0b_3x3')
  185. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  186. scope='Conv2d_0c_3x3')
  187. with tf.variable_scope('Branch_3'):
  188. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  189. branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
  190. scope='Conv2d_0b_1x1')
  191. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  192. end_points[end_point] = net
  193. if end_point == final_endpoint: return net, end_points
  194. # mixed_3: 17 x 17 x 768.
  195. end_point = 'Mixed_6a'
  196. with tf.variable_scope(end_point):
  197. with tf.variable_scope('Branch_0'):
  198. branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
  199. padding='VALID', scope='Conv2d_1a_1x1')
  200. with tf.variable_scope('Branch_1'):
  201. branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  202. branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
  203. scope='Conv2d_0b_3x3')
  204. branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
  205. padding='VALID', scope='Conv2d_1a_1x1')
  206. with tf.variable_scope('Branch_2'):
  207. branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
  208. scope='MaxPool_1a_3x3')
  209. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
  210. end_points[end_point] = net
  211. if end_point == final_endpoint: return net, end_points
  212. # mixed4: 17 x 17 x 768.
  213. end_point = 'Mixed_6b'
  214. with tf.variable_scope(end_point):
  215. with tf.variable_scope('Branch_0'):
  216. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  217. with tf.variable_scope('Branch_1'):
  218. branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
  219. branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
  220. scope='Conv2d_0b_1x7')
  221. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  222. scope='Conv2d_0c_7x1')
  223. with tf.variable_scope('Branch_2'):
  224. branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
  225. branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
  226. scope='Conv2d_0b_7x1')
  227. branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
  228. scope='Conv2d_0c_1x7')
  229. branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
  230. scope='Conv2d_0d_7x1')
  231. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  232. scope='Conv2d_0e_1x7')
  233. with tf.variable_scope('Branch_3'):
  234. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  235. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  236. scope='Conv2d_0b_1x1')
  237. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  238. end_points[end_point] = net
  239. if end_point == final_endpoint: return net, end_points
  240. # mixed_5: 17 x 17 x 768.
  241. end_point = 'Mixed_6c'
  242. with tf.variable_scope(end_point):
  243. with tf.variable_scope('Branch_0'):
  244. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  245. with tf.variable_scope('Branch_1'):
  246. branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  247. branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
  248. scope='Conv2d_0b_1x7')
  249. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  250. scope='Conv2d_0c_7x1')
  251. with tf.variable_scope('Branch_2'):
  252. branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  253. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  254. scope='Conv2d_0b_7x1')
  255. branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
  256. scope='Conv2d_0c_1x7')
  257. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  258. scope='Conv2d_0d_7x1')
  259. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  260. scope='Conv2d_0e_1x7')
  261. with tf.variable_scope('Branch_3'):
  262. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  263. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  264. scope='Conv2d_0b_1x1')
  265. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  266. end_points[end_point] = net
  267. if end_point == final_endpoint: return net, end_points
  268. # mixed_6: 17 x 17 x 768.
  269. end_point = 'Mixed_6d'
  270. with tf.variable_scope(end_point):
  271. with tf.variable_scope('Branch_0'):
  272. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  273. with tf.variable_scope('Branch_1'):
  274. branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  275. branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
  276. scope='Conv2d_0b_1x7')
  277. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  278. scope='Conv2d_0c_7x1')
  279. with tf.variable_scope('Branch_2'):
  280. branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  281. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  282. scope='Conv2d_0b_7x1')
  283. branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
  284. scope='Conv2d_0c_1x7')
  285. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  286. scope='Conv2d_0d_7x1')
  287. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  288. scope='Conv2d_0e_1x7')
  289. with tf.variable_scope('Branch_3'):
  290. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  291. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  292. scope='Conv2d_0b_1x1')
  293. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  294. end_points[end_point] = net
  295. if end_point == final_endpoint: return net, end_points
  296. # mixed_7: 17 x 17 x 768.
  297. end_point = 'Mixed_6e'
  298. with tf.variable_scope(end_point):
  299. with tf.variable_scope('Branch_0'):
  300. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  301. with tf.variable_scope('Branch_1'):
  302. branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  303. branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
  304. scope='Conv2d_0b_1x7')
  305. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  306. scope='Conv2d_0c_7x1')
  307. with tf.variable_scope('Branch_2'):
  308. branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  309. branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
  310. scope='Conv2d_0b_7x1')
  311. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  312. scope='Conv2d_0c_1x7')
  313. branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
  314. scope='Conv2d_0d_7x1')
  315. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  316. scope='Conv2d_0e_1x7')
  317. with tf.variable_scope('Branch_3'):
  318. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  319. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  320. scope='Conv2d_0b_1x1')
  321. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  322. end_points[end_point] = net
  323. if end_point == final_endpoint: return net, end_points
  324. # mixed_8: 8 x 8 x 1280.
  325. end_point = 'Mixed_7a'
  326. with tf.variable_scope(end_point):
  327. with tf.variable_scope('Branch_0'):
  328. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  329. branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
  330. padding='VALID', scope='Conv2d_1a_3x3')
  331. with tf.variable_scope('Branch_1'):
  332. branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  333. branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
  334. scope='Conv2d_0b_1x7')
  335. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  336. scope='Conv2d_0c_7x1')
  337. branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
  338. padding='VALID', scope='Conv2d_1a_3x3')
  339. with tf.variable_scope('Branch_2'):
  340. branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
  341. scope='MaxPool_1a_3x3')
  342. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
  343. end_points[end_point] = net
  344. if end_point == final_endpoint: return net, end_points
  345. # mixed_9: 8 x 8 x 2048.
  346. end_point = 'Mixed_7b'
  347. with tf.variable_scope(end_point):
  348. with tf.variable_scope('Branch_0'):
  349. branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
  350. with tf.variable_scope('Branch_1'):
  351. branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
  352. branch_1 = tf.concat(axis=3, values=[
  353. slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
  354. slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
  355. with tf.variable_scope('Branch_2'):
  356. branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
  357. branch_2 = slim.conv2d(
  358. branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
  359. branch_2 = tf.concat(axis=3, values=[
  360. slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
  361. slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
  362. with tf.variable_scope('Branch_3'):
  363. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  364. branch_3 = slim.conv2d(
  365. branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
  366. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  367. end_points[end_point] = net
  368. if end_point == final_endpoint: return net, end_points
  369. # mixed_10: 8 x 8 x 2048.
  370. end_point = 'Mixed_7c'
  371. with tf.variable_scope(end_point):
  372. with tf.variable_scope('Branch_0'):
  373. branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
  374. with tf.variable_scope('Branch_1'):
  375. branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
  376. branch_1 = tf.concat(axis=3, values=[
  377. slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
  378. slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
  379. with tf.variable_scope('Branch_2'):
  380. branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
  381. branch_2 = slim.conv2d(
  382. branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
  383. branch_2 = tf.concat(axis=3, values=[
  384. slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
  385. slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
  386. with tf.variable_scope('Branch_3'):
  387. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  388. branch_3 = slim.conv2d(
  389. branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
  390. net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
  391. end_points[end_point] = net
  392. if end_point == final_endpoint: return net, end_points
  393. raise ValueError('Unknown final endpoint %s' % final_endpoint)
  394. def inception_v3(inputs,
  395. num_classes=1000,
  396. is_training=True,
  397. dropout_keep_prob=0.8,
  398. min_depth=16,
  399. depth_multiplier=1.0,
  400. prediction_fn=slim.softmax,
  401. spatial_squeeze=True,
  402. reuse=None,
  403. scope='InceptionV3'):
  404. """Inception model from http://arxiv.org/abs/1512.00567.
  405. "Rethinking the Inception Architecture for Computer Vision"
  406. Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
  407. Zbigniew Wojna.
  408. With the default arguments this method constructs the exact model defined in
  409. the paper. However, one can experiment with variations of the inception_v3
  410. network by changing arguments dropout_keep_prob, min_depth and
  411. depth_multiplier.
  412. The default image size used to train this network is 299x299.
  413. Args:
  414. inputs: a tensor of size [batch_size, height, width, channels].
  415. num_classes: number of predicted classes.
  416. is_training: whether is training or not.
  417. dropout_keep_prob: the percentage of activation values that are retained.
  418. min_depth: Minimum depth value (number of channels) for all convolution ops.
  419. Enforced when depth_multiplier < 1, and not an active constraint when
  420. depth_multiplier >= 1.
  421. depth_multiplier: Float multiplier for the depth (number of channels)
  422. for all convolution ops. The value must be greater than zero. Typical
  423. usage will be to set this value in (0, 1) to reduce the number of
  424. parameters or computation cost of the model.
  425. prediction_fn: a function to get predictions out of logits.
  426. spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
  427. of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
  428. reuse: whether or not the network and its variables should be reused. To be
  429. able to reuse 'scope' must be given.
  430. scope: Optional variable_scope.
  431. Returns:
  432. logits: the pre-softmax activations, a tensor of size
  433. [batch_size, num_classes]
  434. end_points: a dictionary from components of the network to the corresponding
  435. activation.
  436. Raises:
  437. ValueError: if 'depth_multiplier' is less than or equal to zero.
  438. """
  439. if depth_multiplier <= 0:
  440. raise ValueError('depth_multiplier is not greater than zero.')
  441. depth = lambda d: max(int(d * depth_multiplier), min_depth)
  442. with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes],
  443. reuse=reuse) as scope:
  444. with slim.arg_scope([slim.batch_norm, slim.dropout],
  445. is_training=is_training):
  446. net, end_points = inception_v3_base(
  447. inputs, scope=scope, min_depth=min_depth,
  448. depth_multiplier=depth_multiplier)
  449. # Auxiliary Head logits
  450. with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
  451. stride=1, padding='SAME'):
  452. aux_logits = end_points['Mixed_6e']
  453. with tf.variable_scope('AuxLogits'):
  454. aux_logits = slim.avg_pool2d(
  455. aux_logits, [5, 5], stride=3, padding='VALID',
  456. scope='AvgPool_1a_5x5')
  457. aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
  458. scope='Conv2d_1b_1x1')
  459. # Shape of feature map before the final layer.
  460. kernel_size = _reduced_kernel_size_for_small_input(
  461. aux_logits, [5, 5])
  462. aux_logits = slim.conv2d(
  463. aux_logits, depth(768), kernel_size,
  464. weights_initializer=trunc_normal(0.01),
  465. padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
  466. aux_logits = slim.conv2d(
  467. aux_logits, num_classes, [1, 1], activation_fn=None,
  468. normalizer_fn=None, weights_initializer=trunc_normal(0.001),
  469. scope='Conv2d_2b_1x1')
  470. if spatial_squeeze:
  471. aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
  472. end_points['AuxLogits'] = aux_logits
  473. # Final pooling and prediction
  474. with tf.variable_scope('Logits'):
  475. kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
  476. net = slim.avg_pool2d(net, kernel_size, padding='VALID',
  477. scope='AvgPool_1a_{}x{}'.format(*kernel_size))
  478. # 1 x 1 x 2048
  479. net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
  480. end_points['PreLogits'] = net
  481. # 2048
  482. logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
  483. normalizer_fn=None, scope='Conv2d_1c_1x1')
  484. if spatial_squeeze:
  485. logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
  486. # 1000
  487. end_points['Logits'] = logits
  488. end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  489. return logits, end_points
  490. inception_v3.default_image_size = 299
  491. def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  492. """Define kernel size which is automatically reduced for small input.
  493. If the shape of the input images is unknown at graph construction time this
  494. function assumes that the input images are is large enough.
  495. Args:
  496. input_tensor: input tensor of size [batch_size, height, width, channels].
  497. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
  498. Returns:
  499. a tensor with the kernel size.
  500. TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  501. can be done with the code below. Problems are two-fold: (1) If the shape was
  502. known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  503. handle tensors that define the kernel size.
  504. shape = tf.shape(input_tensor)
  505. return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
  506. tf.minimum(shape[2], kernel_size[1])])
  507. """
  508. shape = input_tensor.get_shape().as_list()
  509. if shape[1] is None or shape[2] is None:
  510. kernel_size_out = kernel_size
  511. else:
  512. kernel_size_out = [min(shape[1], kernel_size[0]),
  513. min(shape[2], kernel_size[1])]
  514. return kernel_size_out
  515. inception_v3_arg_scope = inception_utils.inception_arg_scope