inception_v3.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Contains the definition for inception v3 classification network."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import tensorflow as tf
  20. slim = tf.contrib.slim
  21. trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
  22. def inception_v3_base(inputs,
  23. final_endpoint='Mixed_7c',
  24. min_depth=16,
  25. depth_multiplier=1.0,
  26. scope=None):
  27. """Inception model from http://arxiv.org/abs/1512.00567.
  28. Constructs an Inception v3 network from inputs to the given final endpoint.
  29. This method can construct the network up to the final inception block
  30. Mixed_7c.
  31. Note that the names of the layers in the paper do not correspond to the names
  32. of the endpoints registered by this function although they build the same
  33. network.
  34. Here is a mapping from the old_names to the new names:
  35. Old name | New name
  36. =======================================
  37. conv0 | Conv2d_1a_3x3
  38. conv1 | Conv2d_2a_3x3
  39. conv2 | Conv2d_2b_3x3
  40. pool1 | MaxPool_3a_3x3
  41. conv3 | Conv2d_3b_1x1
  42. conv4 | Conv2d_4a_3x3
  43. pool2 | MaxPool_5a_3x3
  44. mixed_35x35x256a | Mixed_5b
  45. mixed_35x35x288a | Mixed_5c
  46. mixed_35x35x288b | Mixed_5d
  47. mixed_17x17x768a | Mixed_6a
  48. mixed_17x17x768b | Mixed_6b
  49. mixed_17x17x768c | Mixed_6c
  50. mixed_17x17x768d | Mixed_6d
  51. mixed_17x17x768e | Mixed_6e
  52. mixed_8x8x1280a | Mixed_7a
  53. mixed_8x8x2048a | Mixed_7b
  54. mixed_8x8x2048b | Mixed_7c
  55. Args:
  56. inputs: a tensor of size [batch_size, height, width, channels].
  57. final_endpoint: specifies the endpoint to construct the network up to. It
  58. can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
  59. 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
  60. 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
  61. 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
  62. min_depth: Minimum depth value (number of channels) for all convolution ops.
  63. Enforced when depth_multiplier < 1, and not an active constraint when
  64. depth_multiplier >= 1.
  65. depth_multiplier: Float multiplier for the depth (number of channels)
  66. for all convolution ops. The value must be greater than zero. Typical
  67. usage will be to set this value in (0, 1) to reduce the number of
  68. parameters or computation cost of the model.
  69. scope: Optional variable_scope.
  70. Returns:
  71. tensor_out: output tensor corresponding to the final_endpoint.
  72. end_points: a set of activations for external use, for example summaries or
  73. losses.
  74. Raises:
  75. ValueError: if final_endpoint is not set to one of the predefined values,
  76. or depth_multiplier <= 0
  77. """
  78. # end_points will collect relevant activations for external use, for example
  79. # summaries or losses.
  80. end_points = {}
  81. if depth_multiplier <= 0:
  82. raise ValueError('depth_multiplier is not greater than zero.')
  83. depth = lambda d: max(int(d * depth_multiplier), min_depth)
  84. with tf.variable_scope(scope, 'InceptionV3', [inputs]):
  85. with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
  86. stride=1, padding='VALID'):
  87. # 299 x 299 x 3
  88. end_point = 'Conv2d_1a_3x3'
  89. net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
  90. end_points[end_point] = net
  91. if end_point == final_endpoint: return net, end_points
  92. # 149 x 149 x 32
  93. end_point = 'Conv2d_2a_3x3'
  94. net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
  95. end_points[end_point] = net
  96. if end_point == final_endpoint: return net, end_points
  97. # 147 x 147 x 32
  98. end_point = 'Conv2d_2b_3x3'
  99. net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
  100. end_points[end_point] = net
  101. if end_point == final_endpoint: return net, end_points
  102. # 147 x 147 x 64
  103. end_point = 'MaxPool_3a_3x3'
  104. net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
  105. end_points[end_point] = net
  106. if end_point == final_endpoint: return net, end_points
  107. # 73 x 73 x 64
  108. end_point = 'Conv2d_3b_1x1'
  109. net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
  110. end_points[end_point] = net
  111. if end_point == final_endpoint: return net, end_points
  112. # 73 x 73 x 80.
  113. end_point = 'Conv2d_4a_3x3'
  114. net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
  115. end_points[end_point] = net
  116. if end_point == final_endpoint: return net, end_points
  117. # 71 x 71 x 192.
  118. end_point = 'MaxPool_5a_3x3'
  119. net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
  120. end_points[end_point] = net
  121. if end_point == final_endpoint: return net, end_points
  122. # 35 x 35 x 192.
  123. # Inception blocks
  124. with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
  125. stride=1, padding='SAME'):
  126. # mixed: 35 x 35 x 256.
  127. end_point = 'Mixed_5b'
  128. with tf.variable_scope(end_point):
  129. with tf.variable_scope('Branch_0'):
  130. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  131. with tf.variable_scope('Branch_1'):
  132. branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
  133. branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
  134. scope='Conv2d_0b_5x5')
  135. with tf.variable_scope('Branch_2'):
  136. branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  137. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  138. scope='Conv2d_0b_3x3')
  139. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  140. scope='Conv2d_0c_3x3')
  141. with tf.variable_scope('Branch_3'):
  142. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  143. branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
  144. scope='Conv2d_0b_1x1')
  145. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  146. end_points[end_point] = net
  147. if end_point == final_endpoint: return net, end_points
  148. # mixed_1: 35 x 35 x 288.
  149. end_point = 'Mixed_5c'
  150. with tf.variable_scope(end_point):
  151. with tf.variable_scope('Branch_0'):
  152. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  153. with tf.variable_scope('Branch_1'):
  154. branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
  155. branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
  156. scope='Conv_1_0c_5x5')
  157. with tf.variable_scope('Branch_2'):
  158. branch_2 = slim.conv2d(net, depth(64), [1, 1],
  159. scope='Conv2d_0a_1x1')
  160. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  161. scope='Conv2d_0b_3x3')
  162. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  163. scope='Conv2d_0c_3x3')
  164. with tf.variable_scope('Branch_3'):
  165. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  166. branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
  167. scope='Conv2d_0b_1x1')
  168. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  169. end_points[end_point] = net
  170. if end_point == final_endpoint: return net, end_points
  171. # mixed_2: 35 x 35 x 288.
  172. end_point = 'Mixed_5d'
  173. with tf.variable_scope(end_point):
  174. with tf.variable_scope('Branch_0'):
  175. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  176. with tf.variable_scope('Branch_1'):
  177. branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
  178. branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
  179. scope='Conv2d_0b_5x5')
  180. with tf.variable_scope('Branch_2'):
  181. branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  182. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  183. scope='Conv2d_0b_3x3')
  184. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  185. scope='Conv2d_0c_3x3')
  186. with tf.variable_scope('Branch_3'):
  187. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  188. branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
  189. scope='Conv2d_0b_1x1')
  190. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  191. end_points[end_point] = net
  192. if end_point == final_endpoint: return net, end_points
  193. # mixed_3: 17 x 17 x 768.
  194. end_point = 'Mixed_6a'
  195. with tf.variable_scope(end_point):
  196. with tf.variable_scope('Branch_0'):
  197. branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
  198. padding='VALID', scope='Conv2d_1a_1x1')
  199. with tf.variable_scope('Branch_1'):
  200. branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  201. branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
  202. scope='Conv2d_0b_3x3')
  203. branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
  204. padding='VALID', scope='Conv2d_1a_1x1')
  205. with tf.variable_scope('Branch_2'):
  206. branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
  207. scope='MaxPool_1a_3x3')
  208. net = tf.concat(3, [branch_0, branch_1, branch_2])
  209. end_points[end_point] = net
  210. if end_point == final_endpoint: return net, end_points
  211. # mixed4: 17 x 17 x 768.
  212. end_point = 'Mixed_6b'
  213. with tf.variable_scope(end_point):
  214. with tf.variable_scope('Branch_0'):
  215. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  216. with tf.variable_scope('Branch_1'):
  217. branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
  218. branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
  219. scope='Conv2d_0b_1x7')
  220. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  221. scope='Conv2d_0c_7x1')
  222. with tf.variable_scope('Branch_2'):
  223. branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
  224. branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
  225. scope='Conv2d_0b_7x1')
  226. branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
  227. scope='Conv2d_0c_1x7')
  228. branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
  229. scope='Conv2d_0d_7x1')
  230. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  231. scope='Conv2d_0e_1x7')
  232. with tf.variable_scope('Branch_3'):
  233. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  234. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  235. scope='Conv2d_0b_1x1')
  236. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  237. end_points[end_point] = net
  238. if end_point == final_endpoint: return net, end_points
  239. # mixed_5: 17 x 17 x 768.
  240. end_point = 'Mixed_6c'
  241. with tf.variable_scope(end_point):
  242. with tf.variable_scope('Branch_0'):
  243. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  244. with tf.variable_scope('Branch_1'):
  245. branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  246. branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
  247. scope='Conv2d_0b_1x7')
  248. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  249. scope='Conv2d_0c_7x1')
  250. with tf.variable_scope('Branch_2'):
  251. branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  252. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  253. scope='Conv2d_0b_7x1')
  254. branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
  255. scope='Conv2d_0c_1x7')
  256. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  257. scope='Conv2d_0d_7x1')
  258. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  259. scope='Conv2d_0e_1x7')
  260. with tf.variable_scope('Branch_3'):
  261. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  262. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  263. scope='Conv2d_0b_1x1')
  264. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  265. end_points[end_point] = net
  266. if end_point == final_endpoint: return net, end_points
  267. # mixed_6: 17 x 17 x 768.
  268. end_point = 'Mixed_6d'
  269. with tf.variable_scope(end_point):
  270. with tf.variable_scope('Branch_0'):
  271. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  272. with tf.variable_scope('Branch_1'):
  273. branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  274. branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
  275. scope='Conv2d_0b_1x7')
  276. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  277. scope='Conv2d_0c_7x1')
  278. with tf.variable_scope('Branch_2'):
  279. branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  280. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  281. scope='Conv2d_0b_7x1')
  282. branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
  283. scope='Conv2d_0c_1x7')
  284. branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
  285. scope='Conv2d_0d_7x1')
  286. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  287. scope='Conv2d_0e_1x7')
  288. with tf.variable_scope('Branch_3'):
  289. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  290. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  291. scope='Conv2d_0b_1x1')
  292. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  293. end_points[end_point] = net
  294. if end_point == final_endpoint: return net, end_points
  295. # mixed_7: 17 x 17 x 768.
  296. end_point = 'Mixed_6e'
  297. with tf.variable_scope(end_point):
  298. with tf.variable_scope('Branch_0'):
  299. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  300. with tf.variable_scope('Branch_1'):
  301. branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  302. branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
  303. scope='Conv2d_0b_1x7')
  304. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  305. scope='Conv2d_0c_7x1')
  306. with tf.variable_scope('Branch_2'):
  307. branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  308. branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
  309. scope='Conv2d_0b_7x1')
  310. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  311. scope='Conv2d_0c_1x7')
  312. branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
  313. scope='Conv2d_0d_7x1')
  314. branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
  315. scope='Conv2d_0e_1x7')
  316. with tf.variable_scope('Branch_3'):
  317. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  318. branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
  319. scope='Conv2d_0b_1x1')
  320. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  321. end_points[end_point] = net
  322. if end_point == final_endpoint: return net, end_points
  323. # mixed_8: 8 x 8 x 1280.
  324. end_point = 'Mixed_7a'
  325. with tf.variable_scope(end_point):
  326. with tf.variable_scope('Branch_0'):
  327. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  328. branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
  329. padding='VALID', scope='Conv2d_1a_3x3')
  330. with tf.variable_scope('Branch_1'):
  331. branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  332. branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
  333. scope='Conv2d_0b_1x7')
  334. branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
  335. scope='Conv2d_0c_7x1')
  336. branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
  337. padding='VALID', scope='Conv2d_1a_3x3')
  338. with tf.variable_scope('Branch_2'):
  339. branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
  340. scope='MaxPool_1a_3x3')
  341. net = tf.concat(3, [branch_0, branch_1, branch_2])
  342. end_points[end_point] = net
  343. if end_point == final_endpoint: return net, end_points
  344. # mixed_9: 8 x 8 x 2048.
  345. end_point = 'Mixed_7b'
  346. with tf.variable_scope(end_point):
  347. with tf.variable_scope('Branch_0'):
  348. branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
  349. with tf.variable_scope('Branch_1'):
  350. branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
  351. branch_1 = tf.concat(3, [
  352. slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
  353. slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
  354. with tf.variable_scope('Branch_2'):
  355. branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
  356. branch_2 = slim.conv2d(
  357. branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
  358. branch_2 = tf.concat(3, [
  359. slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
  360. slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
  361. with tf.variable_scope('Branch_3'):
  362. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  363. branch_3 = slim.conv2d(
  364. branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
  365. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  366. end_points[end_point] = net
  367. if end_point == final_endpoint: return net, end_points
  368. # mixed_10: 8 x 8 x 2048.
  369. end_point = 'Mixed_7c'
  370. with tf.variable_scope(end_point):
  371. with tf.variable_scope('Branch_0'):
  372. branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
  373. with tf.variable_scope('Branch_1'):
  374. branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
  375. branch_1 = tf.concat(3, [
  376. slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
  377. slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
  378. with tf.variable_scope('Branch_2'):
  379. branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
  380. branch_2 = slim.conv2d(
  381. branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
  382. branch_2 = tf.concat(3, [
  383. slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
  384. slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
  385. with tf.variable_scope('Branch_3'):
  386. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  387. branch_3 = slim.conv2d(
  388. branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
  389. net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
  390. end_points[end_point] = net
  391. if end_point == final_endpoint: return net, end_points
  392. raise ValueError('Unknown final endpoint %s' % final_endpoint)
  393. def inception_v3(inputs,
  394. num_classes=1000,
  395. is_training=True,
  396. dropout_keep_prob=0.8,
  397. min_depth=16,
  398. depth_multiplier=1.0,
  399. prediction_fn=slim.softmax,
  400. spatial_squeeze=True,
  401. reuse=None,
  402. scope='InceptionV3'):
  403. """Inception model from http://arxiv.org/abs/1512.00567.
  404. "Rethinking the Inception Architecture for Computer Vision"
  405. Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
  406. Zbigniew Wojna.
  407. With the default arguments this method constructs the exact model defined in
  408. the paper. However, one can experiment with variations of the inception_v3
  409. network by changing arguments dropout_keep_prob, min_depth and
  410. depth_multiplier.
  411. The default image size used to train this network is 299x299.
  412. Args:
  413. inputs: a tensor of size [batch_size, height, width, channels].
  414. num_classes: number of predicted classes.
  415. is_training: whether is training or not.
  416. dropout_keep_prob: the percentage of activation values that are retained.
  417. min_depth: Minimum depth value (number of channels) for all convolution ops.
  418. Enforced when depth_multiplier < 1, and not an active constraint when
  419. depth_multiplier >= 1.
  420. depth_multiplier: Float multiplier for the depth (number of channels)
  421. for all convolution ops. The value must be greater than zero. Typical
  422. usage will be to set this value in (0, 1) to reduce the number of
  423. parameters or computation cost of the model.
  424. prediction_fn: a function to get predictions out of logits.
  425. spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
  426. of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
  427. reuse: whether or not the network and its variables should be reused. To be
  428. able to reuse 'scope' must be given.
  429. scope: Optional variable_scope.
  430. Returns:
  431. logits: the pre-softmax activations, a tensor of size
  432. [batch_size, num_classes]
  433. end_points: a dictionary from components of the network to the corresponding
  434. activation.
  435. Raises:
  436. ValueError: if 'depth_multiplier' is less than or equal to zero.
  437. """
  438. if depth_multiplier <= 0:
  439. raise ValueError('depth_multiplier is not greater than zero.')
  440. depth = lambda d: max(int(d * depth_multiplier), min_depth)
  441. with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes],
  442. reuse=reuse) as scope:
  443. with slim.arg_scope([slim.batch_norm, slim.dropout],
  444. is_training=is_training):
  445. net, end_points = inception_v3_base(
  446. inputs, scope=scope, min_depth=min_depth,
  447. depth_multiplier=depth_multiplier)
  448. # Auxiliary Head logits
  449. with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
  450. stride=1, padding='SAME'):
  451. aux_logits = end_points['Mixed_6e']
  452. with tf.variable_scope('AuxLogits'):
  453. aux_logits = slim.avg_pool2d(
  454. aux_logits, [5, 5], stride=3, padding='VALID',
  455. scope='AvgPool_1a_5x5')
  456. aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
  457. scope='Conv2d_1b_1x1')
  458. # Shape of feature map before the final layer.
  459. kernel_size = _reduced_kernel_size_for_small_input(
  460. aux_logits, [5, 5])
  461. aux_logits = slim.conv2d(
  462. aux_logits, depth(768), kernel_size,
  463. weights_initializer=trunc_normal(0.01),
  464. padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
  465. aux_logits = slim.conv2d(
  466. aux_logits, num_classes, [1, 1], activation_fn=None,
  467. normalizer_fn=None, weights_initializer=trunc_normal(0.001),
  468. scope='Conv2d_2b_1x1')
  469. if spatial_squeeze:
  470. aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
  471. end_points['AuxLogits'] = aux_logits
  472. # Final pooling and prediction
  473. with tf.variable_scope('Logits'):
  474. kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
  475. net = slim.avg_pool2d(net, kernel_size, padding='VALID',
  476. scope='AvgPool_1a_{}x{}'.format(*kernel_size))
  477. # 1 x 1 x 2048
  478. net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
  479. end_points['PreLogits'] = net
  480. # 2048
  481. logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
  482. normalizer_fn=None, scope='Conv2d_1c_1x1')
  483. if spatial_squeeze:
  484. logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
  485. # 1000
  486. end_points['Logits'] = logits
  487. end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  488. return logits, end_points
  489. inception_v3.default_image_size = 299
  490. def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  491. """Define kernel size which is automatically reduced for small input.
  492. If the shape of the input images is unknown at graph construction time this
  493. function assumes that the input images are is large enough.
  494. Args:
  495. input_tensor: input tensor of size [batch_size, height, width, channels].
  496. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
  497. Returns:
  498. a tensor with the kernel size.
  499. TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  500. can be done with the code below. Problems are two-fold: (1) If the shape was
  501. known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  502. handle tensors that define the kernel size.
  503. shape = tf.shape(input_tensor)
  504. return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
  505. tf.minimum(shape[2], kernel_size[1])])
  506. """
  507. shape = input_tensor.get_shape().as_list()
  508. if shape[1] is None or shape[2] is None:
  509. kernel_size_out = kernel_size
  510. else:
  511. kernel_size_out = [min(shape[1], kernel_size[0]),
  512. min(shape[2], kernel_size[1])]
  513. return kernel_size_out
  514. def inception_v3_arg_scope(weight_decay=0.00004,
  515. stddev=0.1):
  516. """Defines the default InceptionV3 arg scope.
  517. Args:
  518. weight_decay: The weight decay to use for regularizing the model.
  519. stddev: The standard deviation of the trunctated normal weight initializer.
  520. Returns:
  521. An `arg_scope` to use for the inception v3 model.
  522. """
  523. batch_norm_params = {
  524. # Decay for the moving averages.
  525. 'decay': 0.9997,
  526. # epsilon to prevent 0s in variance.
  527. 'epsilon': 0.001,
  528. # collection containing update_ops.
  529. 'updates_collections': tf.GraphKeys.UPDATE_OPS,
  530. }
  531. # Set weight_decay for weights in Conv and FC layers.
  532. with slim.arg_scope([slim.conv2d, slim.fully_connected],
  533. weights_regularizer=slim.l2_regularizer(weight_decay)):
  534. with slim.arg_scope(
  535. [slim.conv2d],
  536. weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
  537. activation_fn=tf.nn.relu,
  538. normalizer_fn=slim.batch_norm,
  539. normalizer_params=batch_norm_params) as sc:
  540. return sc