inception_v2.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Contains the definition for inception v2 classification network."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import tensorflow as tf
  20. from nets import inception_utils
  21. slim = tf.contrib.slim
  22. trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
  23. def inception_v2_base(inputs,
  24. final_endpoint='Mixed_5c',
  25. min_depth=16,
  26. depth_multiplier=1.0,
  27. use_separable_conv=True,
  28. data_format='NHWC',
  29. scope=None):
  30. """Inception v2 (6a2).
  31. Constructs an Inception v2 network from inputs to the given final endpoint.
  32. This method can construct the network up to the layer inception(5b) as
  33. described in http://arxiv.org/abs/1502.03167.
  34. Args:
  35. inputs: a tensor of shape [batch_size, height, width, channels].
  36. final_endpoint: specifies the endpoint to construct the network up to. It
  37. can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
  38. 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
  39. 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
  40. 'Mixed_5c'].
  41. min_depth: Minimum depth value (number of channels) for all convolution ops.
  42. Enforced when depth_multiplier < 1, and not an active constraint when
  43. depth_multiplier >= 1.
  44. depth_multiplier: Float multiplier for the depth (number of channels)
  45. for all convolution ops. The value must be greater than zero. Typical
  46. usage will be to set this value in (0, 1) to reduce the number of
  47. parameters or computation cost of the model.
  48. use_separable_conv: Use a separable convolution for the first layer
  49. Conv2d_1a_7x7. If this is False, use a normal convolution instead.
  50. data_format: Data format of the activations ('NHWC' or 'NCHW').
  51. scope: Optional variable_scope.
  52. Returns:
  53. tensor_out: output tensor corresponding to the final_endpoint.
  54. end_points: a set of activations for external use, for example summaries or
  55. losses.
  56. Raises:
  57. ValueError: if final_endpoint is not set to one of the predefined values,
  58. or depth_multiplier <= 0
  59. """
  60. # end_points will collect relevant activations for external use, for example
  61. # summaries or losses.
  62. end_points = {}
  63. # Used to find thinned depths for each layer.
  64. if depth_multiplier <= 0:
  65. raise ValueError('depth_multiplier is not greater than zero.')
  66. depth = lambda d: max(int(d * depth_multiplier), min_depth)
  67. if data_format != 'NHWC' and data_format != 'NCHW':
  68. raise ValueError('data_format must be either NHWC or NCHW.')
  69. if data_format == 'NCHW' and use_separable_conv:
  70. raise ValueError(
  71. 'separable convolution only supports NHWC layout. NCHW data format can'
  72. ' only be used when use_separable_conv is False.'
  73. )
  74. concat_dim = 3 if data_format == 'NHWC' else 1
  75. with tf.variable_scope(scope, 'InceptionV2', [inputs]):
  76. with slim.arg_scope(
  77. [slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
  78. stride=1,
  79. padding='SAME',
  80. data_format=data_format):
  81. # Note that sizes in the comments below assume an input spatial size of
  82. # 224x224, however, the inputs can be of any size greater 32x32.
  83. # 224 x 224 x 3
  84. end_point = 'Conv2d_1a_7x7'
  85. if use_separable_conv:
  86. # depthwise_multiplier here is different from depth_multiplier.
  87. # depthwise_multiplier determines the output channels of the initial
  88. # depthwise conv (see docs for tf.nn.separable_conv2d), while
  89. # depth_multiplier controls the # channels of the subsequent 1x1
  90. # convolution. Must have
  91. # in_channels * depthwise_multipler <= out_channels
  92. # so that the separable convolution is not overparameterized.
  93. depthwise_multiplier = min(int(depth(64) / 3), 8)
  94. net = slim.separable_conv2d(
  95. inputs, depth(64), [7, 7],
  96. depth_multiplier=depthwise_multiplier,
  97. stride=2,
  98. padding='SAME',
  99. weights_initializer=trunc_normal(1.0),
  100. scope=end_point)
  101. else:
  102. # Use a normal convolution instead of a separable convolution.
  103. net = slim.conv2d(
  104. inputs,
  105. depth(64), [7, 7],
  106. stride=2,
  107. weights_initializer=trunc_normal(1.0),
  108. scope=end_point)
  109. end_points[end_point] = net
  110. if end_point == final_endpoint: return net, end_points
  111. # 112 x 112 x 64
  112. end_point = 'MaxPool_2a_3x3'
  113. net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
  114. end_points[end_point] = net
  115. if end_point == final_endpoint: return net, end_points
  116. # 56 x 56 x 64
  117. end_point = 'Conv2d_2b_1x1'
  118. net = slim.conv2d(net, depth(64), [1, 1], scope=end_point,
  119. weights_initializer=trunc_normal(0.1))
  120. end_points[end_point] = net
  121. if end_point == final_endpoint: return net, end_points
  122. # 56 x 56 x 64
  123. end_point = 'Conv2d_2c_3x3'
  124. net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
  125. end_points[end_point] = net
  126. if end_point == final_endpoint: return net, end_points
  127. # 56 x 56 x 192
  128. end_point = 'MaxPool_3a_3x3'
  129. net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
  130. end_points[end_point] = net
  131. if end_point == final_endpoint: return net, end_points
  132. # 28 x 28 x 192
  133. # Inception module.
  134. end_point = 'Mixed_3b'
  135. with tf.variable_scope(end_point):
  136. with tf.variable_scope('Branch_0'):
  137. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  138. with tf.variable_scope('Branch_1'):
  139. branch_1 = slim.conv2d(
  140. net, depth(64), [1, 1],
  141. weights_initializer=trunc_normal(0.09),
  142. scope='Conv2d_0a_1x1')
  143. branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
  144. scope='Conv2d_0b_3x3')
  145. with tf.variable_scope('Branch_2'):
  146. branch_2 = slim.conv2d(
  147. net, depth(64), [1, 1],
  148. weights_initializer=trunc_normal(0.09),
  149. scope='Conv2d_0a_1x1')
  150. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  151. scope='Conv2d_0b_3x3')
  152. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  153. scope='Conv2d_0c_3x3')
  154. with tf.variable_scope('Branch_3'):
  155. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  156. branch_3 = slim.conv2d(
  157. branch_3, depth(32), [1, 1],
  158. weights_initializer=trunc_normal(0.1),
  159. scope='Conv2d_0b_1x1')
  160. net = tf.concat(
  161. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  162. end_points[end_point] = net
  163. if end_point == final_endpoint: return net, end_points
  164. # 28 x 28 x 256
  165. end_point = 'Mixed_3c'
  166. with tf.variable_scope(end_point):
  167. with tf.variable_scope('Branch_0'):
  168. branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
  169. with tf.variable_scope('Branch_1'):
  170. branch_1 = slim.conv2d(
  171. net, depth(64), [1, 1],
  172. weights_initializer=trunc_normal(0.09),
  173. scope='Conv2d_0a_1x1')
  174. branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
  175. scope='Conv2d_0b_3x3')
  176. with tf.variable_scope('Branch_2'):
  177. branch_2 = slim.conv2d(
  178. net, depth(64), [1, 1],
  179. weights_initializer=trunc_normal(0.09),
  180. scope='Conv2d_0a_1x1')
  181. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  182. scope='Conv2d_0b_3x3')
  183. branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
  184. scope='Conv2d_0c_3x3')
  185. with tf.variable_scope('Branch_3'):
  186. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  187. branch_3 = slim.conv2d(
  188. branch_3, depth(64), [1, 1],
  189. weights_initializer=trunc_normal(0.1),
  190. scope='Conv2d_0b_1x1')
  191. net = tf.concat(
  192. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  193. end_points[end_point] = net
  194. if end_point == final_endpoint: return net, end_points
  195. # 28 x 28 x 320
  196. end_point = 'Mixed_4a'
  197. with tf.variable_scope(end_point):
  198. with tf.variable_scope('Branch_0'):
  199. branch_0 = slim.conv2d(
  200. net, depth(128), [1, 1],
  201. weights_initializer=trunc_normal(0.09),
  202. scope='Conv2d_0a_1x1')
  203. branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
  204. scope='Conv2d_1a_3x3')
  205. with tf.variable_scope('Branch_1'):
  206. branch_1 = slim.conv2d(
  207. net, depth(64), [1, 1],
  208. weights_initializer=trunc_normal(0.09),
  209. scope='Conv2d_0a_1x1')
  210. branch_1 = slim.conv2d(
  211. branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
  212. branch_1 = slim.conv2d(
  213. branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
  214. with tf.variable_scope('Branch_2'):
  215. branch_2 = slim.max_pool2d(
  216. net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
  217. net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2])
  218. end_points[end_point] = net
  219. if end_point == final_endpoint: return net, end_points
  220. # 14 x 14 x 576
  221. end_point = 'Mixed_4b'
  222. with tf.variable_scope(end_point):
  223. with tf.variable_scope('Branch_0'):
  224. branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
  225. with tf.variable_scope('Branch_1'):
  226. branch_1 = slim.conv2d(
  227. net, depth(64), [1, 1],
  228. weights_initializer=trunc_normal(0.09),
  229. scope='Conv2d_0a_1x1')
  230. branch_1 = slim.conv2d(
  231. branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
  232. with tf.variable_scope('Branch_2'):
  233. branch_2 = slim.conv2d(
  234. net, depth(96), [1, 1],
  235. weights_initializer=trunc_normal(0.09),
  236. scope='Conv2d_0a_1x1')
  237. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  238. scope='Conv2d_0b_3x3')
  239. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  240. scope='Conv2d_0c_3x3')
  241. with tf.variable_scope('Branch_3'):
  242. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  243. branch_3 = slim.conv2d(
  244. branch_3, depth(128), [1, 1],
  245. weights_initializer=trunc_normal(0.1),
  246. scope='Conv2d_0b_1x1')
  247. net = tf.concat(
  248. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  249. end_points[end_point] = net
  250. if end_point == final_endpoint: return net, end_points
  251. # 14 x 14 x 576
  252. end_point = 'Mixed_4c'
  253. with tf.variable_scope(end_point):
  254. with tf.variable_scope('Branch_0'):
  255. branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
  256. with tf.variable_scope('Branch_1'):
  257. branch_1 = slim.conv2d(
  258. net, depth(96), [1, 1],
  259. weights_initializer=trunc_normal(0.09),
  260. scope='Conv2d_0a_1x1')
  261. branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
  262. scope='Conv2d_0b_3x3')
  263. with tf.variable_scope('Branch_2'):
  264. branch_2 = slim.conv2d(
  265. net, depth(96), [1, 1],
  266. weights_initializer=trunc_normal(0.09),
  267. scope='Conv2d_0a_1x1')
  268. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  269. scope='Conv2d_0b_3x3')
  270. branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
  271. scope='Conv2d_0c_3x3')
  272. with tf.variable_scope('Branch_3'):
  273. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  274. branch_3 = slim.conv2d(
  275. branch_3, depth(128), [1, 1],
  276. weights_initializer=trunc_normal(0.1),
  277. scope='Conv2d_0b_1x1')
  278. net = tf.concat(
  279. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  280. end_points[end_point] = net
  281. if end_point == final_endpoint: return net, end_points
  282. # 14 x 14 x 576
  283. end_point = 'Mixed_4d'
  284. with tf.variable_scope(end_point):
  285. with tf.variable_scope('Branch_0'):
  286. branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
  287. with tf.variable_scope('Branch_1'):
  288. branch_1 = slim.conv2d(
  289. net, depth(128), [1, 1],
  290. weights_initializer=trunc_normal(0.09),
  291. scope='Conv2d_0a_1x1')
  292. branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
  293. scope='Conv2d_0b_3x3')
  294. with tf.variable_scope('Branch_2'):
  295. branch_2 = slim.conv2d(
  296. net, depth(128), [1, 1],
  297. weights_initializer=trunc_normal(0.09),
  298. scope='Conv2d_0a_1x1')
  299. branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
  300. scope='Conv2d_0b_3x3')
  301. branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
  302. scope='Conv2d_0c_3x3')
  303. with tf.variable_scope('Branch_3'):
  304. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  305. branch_3 = slim.conv2d(
  306. branch_3, depth(96), [1, 1],
  307. weights_initializer=trunc_normal(0.1),
  308. scope='Conv2d_0b_1x1')
  309. net = tf.concat(
  310. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  311. end_points[end_point] = net
  312. if end_point == final_endpoint: return net, end_points
  313. # 14 x 14 x 576
  314. end_point = 'Mixed_4e'
  315. with tf.variable_scope(end_point):
  316. with tf.variable_scope('Branch_0'):
  317. branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
  318. with tf.variable_scope('Branch_1'):
  319. branch_1 = slim.conv2d(
  320. net, depth(128), [1, 1],
  321. weights_initializer=trunc_normal(0.09),
  322. scope='Conv2d_0a_1x1')
  323. branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
  324. scope='Conv2d_0b_3x3')
  325. with tf.variable_scope('Branch_2'):
  326. branch_2 = slim.conv2d(
  327. net, depth(160), [1, 1],
  328. weights_initializer=trunc_normal(0.09),
  329. scope='Conv2d_0a_1x1')
  330. branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
  331. scope='Conv2d_0b_3x3')
  332. branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
  333. scope='Conv2d_0c_3x3')
  334. with tf.variable_scope('Branch_3'):
  335. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  336. branch_3 = slim.conv2d(
  337. branch_3, depth(96), [1, 1],
  338. weights_initializer=trunc_normal(0.1),
  339. scope='Conv2d_0b_1x1')
  340. net = tf.concat(
  341. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  342. end_points[end_point] = net
  343. if end_point == final_endpoint: return net, end_points
  344. # 14 x 14 x 576
  345. end_point = 'Mixed_5a'
  346. with tf.variable_scope(end_point):
  347. with tf.variable_scope('Branch_0'):
  348. branch_0 = slim.conv2d(
  349. net, depth(128), [1, 1],
  350. weights_initializer=trunc_normal(0.09),
  351. scope='Conv2d_0a_1x1')
  352. branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
  353. scope='Conv2d_1a_3x3')
  354. with tf.variable_scope('Branch_1'):
  355. branch_1 = slim.conv2d(
  356. net, depth(192), [1, 1],
  357. weights_initializer=trunc_normal(0.09),
  358. scope='Conv2d_0a_1x1')
  359. branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
  360. scope='Conv2d_0b_3x3')
  361. branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
  362. scope='Conv2d_1a_3x3')
  363. with tf.variable_scope('Branch_2'):
  364. branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
  365. scope='MaxPool_1a_3x3')
  366. net = tf.concat(
  367. axis=concat_dim, values=[branch_0, branch_1, branch_2])
  368. end_points[end_point] = net
  369. if end_point == final_endpoint: return net, end_points
  370. # 7 x 7 x 1024
  371. end_point = 'Mixed_5b'
  372. with tf.variable_scope(end_point):
  373. with tf.variable_scope('Branch_0'):
  374. branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
  375. with tf.variable_scope('Branch_1'):
  376. branch_1 = slim.conv2d(
  377. net, depth(192), [1, 1],
  378. weights_initializer=trunc_normal(0.09),
  379. scope='Conv2d_0a_1x1')
  380. branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
  381. scope='Conv2d_0b_3x3')
  382. with tf.variable_scope('Branch_2'):
  383. branch_2 = slim.conv2d(
  384. net, depth(160), [1, 1],
  385. weights_initializer=trunc_normal(0.09),
  386. scope='Conv2d_0a_1x1')
  387. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  388. scope='Conv2d_0b_3x3')
  389. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  390. scope='Conv2d_0c_3x3')
  391. with tf.variable_scope('Branch_3'):
  392. branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
  393. branch_3 = slim.conv2d(
  394. branch_3, depth(128), [1, 1],
  395. weights_initializer=trunc_normal(0.1),
  396. scope='Conv2d_0b_1x1')
  397. net = tf.concat(
  398. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  399. end_points[end_point] = net
  400. if end_point == final_endpoint: return net, end_points
  401. # 7 x 7 x 1024
  402. end_point = 'Mixed_5c'
  403. with tf.variable_scope(end_point):
  404. with tf.variable_scope('Branch_0'):
  405. branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
  406. with tf.variable_scope('Branch_1'):
  407. branch_1 = slim.conv2d(
  408. net, depth(192), [1, 1],
  409. weights_initializer=trunc_normal(0.09),
  410. scope='Conv2d_0a_1x1')
  411. branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
  412. scope='Conv2d_0b_3x3')
  413. with tf.variable_scope('Branch_2'):
  414. branch_2 = slim.conv2d(
  415. net, depth(192), [1, 1],
  416. weights_initializer=trunc_normal(0.09),
  417. scope='Conv2d_0a_1x1')
  418. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  419. scope='Conv2d_0b_3x3')
  420. branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
  421. scope='Conv2d_0c_3x3')
  422. with tf.variable_scope('Branch_3'):
  423. branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
  424. branch_3 = slim.conv2d(
  425. branch_3, depth(128), [1, 1],
  426. weights_initializer=trunc_normal(0.1),
  427. scope='Conv2d_0b_1x1')
  428. net = tf.concat(
  429. axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
  430. end_points[end_point] = net
  431. if end_point == final_endpoint: return net, end_points
  432. raise ValueError('Unknown final endpoint %s' % final_endpoint)
  433. def inception_v2(inputs,
  434. num_classes=1000,
  435. is_training=True,
  436. dropout_keep_prob=0.8,
  437. min_depth=16,
  438. depth_multiplier=1.0,
  439. prediction_fn=slim.softmax,
  440. spatial_squeeze=True,
  441. reuse=None,
  442. scope='InceptionV2',
  443. global_pool=False):
  444. """Inception v2 model for classification.
  445. Constructs an Inception v2 network for classification as described in
  446. http://arxiv.org/abs/1502.03167.
  447. The default image size used to train this network is 224x224.
  448. Args:
  449. inputs: a tensor of shape [batch_size, height, width, channels].
  450. num_classes: number of predicted classes. If 0 or None, the logits layer
  451. is omitted and the input features to the logits layer (before dropout)
  452. are returned instead.
  453. is_training: whether is training or not.
  454. dropout_keep_prob: the percentage of activation values that are retained.
  455. min_depth: Minimum depth value (number of channels) for all convolution ops.
  456. Enforced when depth_multiplier < 1, and not an active constraint when
  457. depth_multiplier >= 1.
  458. depth_multiplier: Float multiplier for the depth (number of channels)
  459. for all convolution ops. The value must be greater than zero. Typical
  460. usage will be to set this value in (0, 1) to reduce the number of
  461. parameters or computation cost of the model.
  462. prediction_fn: a function to get predictions out of logits.
  463. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
  464. shape [B, 1, 1, C], where B is batch_size and C is number of classes.
  465. reuse: whether or not the network and its variables should be reused. To be
  466. able to reuse 'scope' must be given.
  467. scope: Optional variable_scope.
  468. global_pool: Optional boolean flag to control the avgpooling before the
  469. logits layer. If false or unset, pooling is done with a fixed window
  470. that reduces default-sized inputs to 1x1, while larger inputs lead to
  471. larger outputs. If true, any input size is pooled down to 1x1.
  472. Returns:
  473. net: a Tensor with the logits (pre-softmax activations) if num_classes
  474. is a non-zero integer, or the non-dropped-out input to the logits layer
  475. if num_classes is 0 or None.
  476. end_points: a dictionary from components of the network to the corresponding
  477. activation.
  478. Raises:
  479. ValueError: if final_endpoint is not set to one of the predefined values,
  480. or depth_multiplier <= 0
  481. """
  482. if depth_multiplier <= 0:
  483. raise ValueError('depth_multiplier is not greater than zero.')
  484. # Final pooling and prediction
  485. with tf.variable_scope(scope, 'InceptionV2', [inputs], reuse=reuse) as scope:
  486. with slim.arg_scope([slim.batch_norm, slim.dropout],
  487. is_training=is_training):
  488. net, end_points = inception_v2_base(
  489. inputs, scope=scope, min_depth=min_depth,
  490. depth_multiplier=depth_multiplier)
  491. with tf.variable_scope('Logits'):
  492. if global_pool:
  493. # Global average pooling.
  494. net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
  495. end_points['global_pool'] = net
  496. else:
  497. # Pooling with a fixed kernel size.
  498. kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
  499. net = slim.avg_pool2d(net, kernel_size, padding='VALID',
  500. scope='AvgPool_1a_{}x{}'.format(*kernel_size))
  501. end_points['AvgPool_1a'] = net
  502. if not num_classes:
  503. return net, end_points
  504. # 1 x 1 x 1024
  505. net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
  506. logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
  507. normalizer_fn=None, scope='Conv2d_1c_1x1')
  508. if spatial_squeeze:
  509. logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
  510. end_points['Logits'] = logits
  511. end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  512. return logits, end_points
  513. inception_v2.default_image_size = 224
  514. def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  515. """Define kernel size which is automatically reduced for small input.
  516. If the shape of the input images is unknown at graph construction time this
  517. function assumes that the input images are is large enough.
  518. Args:
  519. input_tensor: input tensor of size [batch_size, height, width, channels].
  520. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
  521. Returns:
  522. a tensor with the kernel size.
  523. TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  524. can be done with the code below. Problems are two-fold: (1) If the shape was
  525. known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  526. handle tensors that define the kernel size.
  527. shape = tf.shape(input_tensor)
  528. return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
  529. tf.minimum(shape[2], kernel_size[1])])
  530. """
  531. shape = input_tensor.get_shape().as_list()
  532. if shape[1] is None or shape[2] is None:
  533. kernel_size_out = kernel_size
  534. else:
  535. kernel_size_out = [min(shape[1], kernel_size[0]),
  536. min(shape[2], kernel_size[1])]
  537. return kernel_size_out
  538. inception_v2_arg_scope = inception_utils.inception_arg_scope