shape_utils.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Utils used to manipulate tensor shapes."""
  16. import tensorflow as tf
  17. from segment.sheet_resolve.lib.ssd_model.utils import static_shape
  18. def _is_tensor(t):
  19. """Returns a boolean indicating whether the input is a tensor.
  20. Args:
  21. t: the input to be tested.
  22. Returns:
  23. a boolean that indicates whether t is a tensor.
  24. """
  25. return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
  26. def _set_dim_0(t, d0):
  27. """Sets the 0-th dimension of the input tensor.
  28. Args:
  29. t: the input tensor, assuming the rank is at least 1.
  30. d0: an integer indicating the 0-th dimension of the input tensor.
  31. Returns:
  32. the tensor t with the 0-th dimension set.
  33. """
  34. t_shape = t.get_shape().as_list()
  35. t_shape[0] = d0
  36. t.set_shape(t_shape)
  37. return t
  38. def pad_tensor(t, length):
  39. """Pads the input tensor with 0s along the first dimension up to the length.
  40. Args:
  41. t: the input tensor, assuming the rank is at least 1.
  42. length: a tensor of shape [1] or an integer, indicating the first dimension
  43. of the input tensor t after padding, assuming length <= t.shape[0].
  44. Returns:
  45. padded_t: the padded tensor, whose first dimension is length. If the length
  46. is an integer, the first dimension of padded_t is set to length
  47. statically.
  48. """
  49. t_rank = tf.rank(t)
  50. t_shape = tf.shape(t)
  51. t_d0 = t_shape[0]
  52. pad_d0 = tf.expand_dims(length - t_d0, 0)
  53. pad_shape = tf.cond(
  54. tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
  55. lambda: tf.expand_dims(length - t_d0, 0))
  56. padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
  57. if not _is_tensor(length):
  58. padded_t = _set_dim_0(padded_t, length)
  59. return padded_t
  60. def clip_tensor(t, length):
  61. """Clips the input tensor along the first dimension up to the length.
  62. Args:
  63. t: the input tensor, assuming the rank is at least 1.
  64. length: a tensor of shape [1] or an integer, indicating the first dimension
  65. of the input tensor t after clipping, assuming length <= t.shape[0].
  66. Returns:
  67. clipped_t: the clipped tensor, whose first dimension is length. If the
  68. length is an integer, the first dimension of clipped_t is set to length
  69. statically.
  70. """
  71. clipped_t = tf.gather(t, tf.range(length))
  72. if not _is_tensor(length):
  73. clipped_t = _set_dim_0(clipped_t, length)
  74. return clipped_t
  75. def pad_or_clip_tensor(t, length):
  76. """Pad or clip the input tensor along the first dimension.
  77. Args:
  78. t: the input tensor, assuming the rank is at least 1.
  79. length: a tensor of shape [1] or an integer, indicating the first dimension
  80. of the input tensor t after processing.
  81. Returns:
  82. processed_t: the processed tensor, whose first dimension is length. If the
  83. length is an integer, the first dimension of the processed tensor is set
  84. to length statically.
  85. """
  86. return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
  87. def pad_or_clip_nd(tensor, output_shape):
  88. """Pad or Clip given tensor to the output shape.
  89. Args:
  90. tensor: Input tensor to pad or clip.
  91. output_shape: A list of integers / scalar tensors (or None for dynamic dim)
  92. representing the size to pad or clip each dimension of the input tensor.
  93. Returns:
  94. Input tensor padded and clipped to the output shape.
  95. """
  96. tensor_shape = tf.shape(tensor)
  97. clip_size = [
  98. tf.where(tensor_shape[i] - shape > 0, shape, -1)
  99. if shape is not None else -1 for i, shape in enumerate(output_shape)
  100. ]
  101. clipped_tensor = tf.slice(
  102. tensor,
  103. begin=tf.zeros(len(clip_size), dtype=tf.int32),
  104. size=clip_size)
  105. # Pad tensor if the shape of clipped tensor is smaller than the expected
  106. # shape.
  107. clipped_tensor_shape = tf.shape(clipped_tensor)
  108. trailing_paddings = [
  109. shape - clipped_tensor_shape[i] if shape is not None else 0
  110. for i, shape in enumerate(output_shape)
  111. ]
  112. paddings = tf.stack(
  113. [
  114. tf.zeros(len(trailing_paddings), dtype=tf.int32),
  115. trailing_paddings
  116. ],
  117. axis=1)
  118. padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
  119. output_static_shape = [
  120. dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
  121. ]
  122. padded_tensor.set_shape(output_static_shape)
  123. return padded_tensor
  124. def combined_static_and_dynamic_shape(tensor):
  125. """Returns a list containing static and dynamic values for the dimensions.
  126. Returns a list of static and dynamic values for shape dimensions. This is
  127. useful to preserve static shapes when available in reshape operation.
  128. Args:
  129. tensor: A tensor of any type.
  130. Returns:
  131. A list of size tensor.shape.ndims containing integers or a scalar tensor.
  132. """
  133. static_tensor_shape = tensor.shape.as_list()
  134. dynamic_tensor_shape = tf.shape(tensor)
  135. combined_shape = []
  136. for index, dim in enumerate(static_tensor_shape):
  137. if dim is not None:
  138. combined_shape.append(dim)
  139. else:
  140. combined_shape.append(dynamic_tensor_shape[index])
  141. return combined_shape
  142. def static_or_dynamic_map_fn(fn, elems, dtype=None,
  143. parallel_iterations=32, back_prop=True):
  144. """Runs map_fn as a (static) for loop when possible.
  145. This function rewrites the map_fn as an explicit unstack input -> for loop
  146. over function calls -> stack result combination. This allows our graphs to
  147. be acyclic when the batch size is static.
  148. For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
  149. Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
  150. with the default tf.map_fn function as it does not accept nested inputs (only
  151. Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
  152. Tensor or list of Tensors.
  153. TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
  154. Args:
  155. fn: The callable to be performed. It accepts one argument, which will have
  156. the same structure as elems. Its output must have the
  157. same structure as elems.
  158. elems: A tensor or list of tensors, each of which will
  159. be unpacked along their first dimension. The sequence of the
  160. resulting slices will be applied to fn.
  161. dtype: (optional) The output type(s) of fn. If fn returns a structure of
  162. Tensors differing from the structure of elems, then dtype is not optional
  163. and must have the same structure as the output of fn.
  164. parallel_iterations: (optional) number of batch items to process in
  165. parallel. This flag is only used if the native tf.map_fn is used
  166. and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
  167. back_prop: (optional) True enables support for back propagation.
  168. This flag is only used if the native tf.map_fn is used.
  169. Returns:
  170. A tensor or sequence of tensors. Each tensor packs the
  171. results of applying fn to tensors unpacked from elems along the first
  172. dimension, from first to last.
  173. Raises:
  174. ValueError: if `elems` a Tensor or a list of Tensors.
  175. ValueError: if `fn` does not return a Tensor or list of Tensors
  176. """
  177. if isinstance(elems, list):
  178. for elem in elems:
  179. if not isinstance(elem, tf.Tensor):
  180. raise ValueError('`elems` must be a Tensor or list of Tensors.')
  181. elem_shapes = [elem.shape.as_list() for elem in elems]
  182. # Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
  183. # to all be the same size along the batch dimension.
  184. for elem_shape in elem_shapes:
  185. if (not elem_shape or not elem_shape[0]
  186. or elem_shape[0] != elem_shapes[0][0]):
  187. return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
  188. arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
  189. outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
  190. else:
  191. if not isinstance(elems, tf.Tensor):
  192. raise ValueError('`elems` must be a Tensor or list of Tensors.')
  193. elems_shape = elems.shape.as_list()
  194. if not elems_shape or not elems_shape[0]:
  195. return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
  196. outputs = [fn(arg) for arg in tf.unstack(elems)]
  197. # Stack `outputs`, which is a list of Tensors or list of lists of Tensors
  198. if all([isinstance(output, tf.Tensor) for output in outputs]):
  199. return tf.stack(outputs)
  200. else:
  201. if all([isinstance(output, list) for output in outputs]):
  202. if all([all(
  203. [isinstance(entry, tf.Tensor) for entry in output_list])
  204. for output_list in outputs]):
  205. return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
  206. raise ValueError('`fn` should return a Tensor or a list of Tensors.')
  207. def check_min_image_dim(min_dim, image_tensor):
  208. """Checks that the image width/height are greater than some number.
  209. This function is used to check that the width and height of an image are above
  210. a certain value. If the image shape is static, this function will perform the
  211. check at graph construction time. Otherwise, if the image shape varies, an
  212. Assertion control dependency will be added to the graph.
  213. Args:
  214. min_dim: The minimum number of pixels along the width and height of the
  215. image.
  216. image_tensor: The image tensor to check size for.
  217. Returns:
  218. If `image_tensor` has dynamic size, return `image_tensor` with a Assert
  219. control dependency. Otherwise returns image_tensor.
  220. Raises:
  221. ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
  222. """
  223. image_shape = image_tensor.get_shape()
  224. image_height = static_shape.get_height(image_shape)
  225. image_width = static_shape.get_width(image_shape)
  226. if image_height is None or image_width is None:
  227. shape_assert = tf.Assert(
  228. tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
  229. tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
  230. ['image size must be >= {} in both height and width.'.format(min_dim)])
  231. with tf.control_dependencies([shape_assert]):
  232. return tf.identity(image_tensor)
  233. if image_height < min_dim or image_width < min_dim:
  234. raise ValueError(
  235. 'image size must be >= %d in both height and width; image dim = %d,%d' %
  236. (min_dim, image_height, image_width))
  237. return image_tensor
  238. def assert_shape_equal(shape_a, shape_b):
  239. """Asserts that shape_a and shape_b are equal.
  240. If the shapes are static, raises a ValueError when the shapes
  241. mismatch.
  242. If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
  243. mismatch.
  244. Args:
  245. shape_a: a list containing shape of the first tensor.
  246. shape_b: a list containing shape of the second tensor.
  247. Returns:
  248. Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
  249. when the shapes are dynamic.
  250. Raises:
  251. ValueError: When shapes are both static and unequal.
  252. """
  253. if (all(isinstance(dim, int) for dim in shape_a) and
  254. all(isinstance(dim, int) for dim in shape_b)):
  255. if shape_a != shape_b:
  256. raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
  257. else: return tf.no_op()
  258. else:
  259. return tf.assert_equal(shape_a, shape_b)
  260. def assert_shape_equal_along_first_dimension(shape_a, shape_b):
  261. """Asserts that shape_a and shape_b are the same along the 0th-dimension.
  262. If the shapes are static, raises a ValueError when the shapes
  263. mismatch.
  264. If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
  265. mismatch.
  266. Args:
  267. shape_a: a list containing shape of the first tensor.
  268. shape_b: a list containing shape of the second tensor.
  269. Returns:
  270. Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
  271. when the shapes are dynamic.
  272. Raises:
  273. ValueError: When shapes are both static and unequal.
  274. """
  275. if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
  276. if shape_a[0] != shape_b[0]:
  277. raise ValueError('Unequal first dimension {}, {}'.format(
  278. shape_a[0], shape_b[0]))
  279. else: return tf.no_op()
  280. else:
  281. return tf.assert_equal(shape_a[0], shape_b[0])
  282. def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
  283. """Asserts the input box tensor is normalized.
  284. Args:
  285. boxes: a tensor of shape [N, 4] where N is the number of boxes.
  286. maximum_normalized_coordinate: Maximum coordinate value to be considered
  287. as normalized, default to 1.1.
  288. Returns:
  289. a tf.Assert op which fails when the input box tensor is not normalized.
  290. Raises:
  291. ValueError: When the input box tensor is not normalized.
  292. """
  293. box_minimum = tf.reduce_min(boxes)
  294. box_maximum = tf.reduce_max(boxes)
  295. return tf.Assert(
  296. tf.logical_and(
  297. tf.less_equal(box_maximum, maximum_normalized_coordinate),
  298. tf.greater_equal(box_minimum, 0)),
  299. [boxes])