提交 d40155f0 编写于 作者: J Jaesung Chung 提交者: TensorFlower Gardener

Do not allow dilated convolution when there are dynamic dims either at first or

last dim.

Also fixed the shape inference code for SpaceToBatchNDOp in order to correctly
handle the given inputs, that contain dynamic dimensions.

PiperOrigin-RevId: 340226617
Change-Id: I054f4ba794b5eb206dba69a67a80eb25514b4a27
上级 8520302c
......@@ -303,3 +303,50 @@ func @testDilatedConvWithDifferentExpandSqueezeAxis(%arg0: tensor<1x128x128xf32>
// CHECK-NEXT: [[RESULT:%.*]] = "tf.BatchToSpaceND"
// CHECK-NEXT: return [[RESULT]]
}
func @testNoDilatedConvWhenFirstDimIsDynamic(%arg0: tensor<?x128x128x3xf32>, %arg1: tensor<5x5x3x8xf32>) -> tensor<?x128x128x8xf32> {
%cst = constant dense<[2, 2]> : tensor<2xi32>
%cst_0 = constant dense<4> : tensor<2x2xi32>
%cst_1 = constant dense<0> : tensor<2x2xi32>
%0 = "tf.SpaceToBatchND"(%arg0, %cst, %cst_0) : (tensor<?x128x128x3xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<?x68x68x3xf32>
%1 = "tf.Conv2D"(%0, %arg1) {padding = "VALID", strides = [1, 1, 1, 1]} : (tensor<?x68x68x3xf32>, tensor<5x5x3x8xf32>) -> tensor<?x64x64x8xf32>
%2 = "tf.BatchToSpaceND"(%1, %cst, %cst_1) : (tensor<?x64x64x8xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<?x128x128x8xf32>
return %2 : tensor<?x128x128x8xf32>
// CHECK-LABEL: testNoDilatedConvWhenFirstDimIsDynamic
// CHECK: [[STB:%.*]] = "tf.SpaceToBatchND"
// CHECK-NEXT: [[CONV:%.*]] = "tf.Conv2D"
// CHECK-NEXT: [[RESULT:%.*]] = "tf.BatchToSpaceND"
// CHECK-NEXT: return [[RESULT]]
}
func @testNoDilatedConvWhenLastDimIsDynamic(%arg0: tensor<1x128x128x?xf32>, %arg1: tensor<5x5x3x8xf32>) -> tensor<1x128x128x?xf32> {
%cst = constant dense<[2, 2]> : tensor<2xi32>
%cst_0 = constant dense<4> : tensor<2x2xi32>
%cst_1 = constant dense<0> : tensor<2x2xi32>
%0 = "tf.SpaceToBatchND"(%arg0, %cst, %cst_0) : (tensor<1x128x128x?xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<4x68x68x?xf32>
%1 = "tf.Conv2D"(%0, %arg1) {padding = "VALID", strides = [1, 1, 1, 1]} : (tensor<4x68x68x?xf32>, tensor<5x5x3x8xf32>) -> tensor<4x64x64x?xf32>
%2 = "tf.BatchToSpaceND"(%1, %cst, %cst_1) : (tensor<4x64x64x?xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<1x128x128x?xf32>
return %2 : tensor<1x128x128x?xf32>
// CHECK-LABEL: testNoDilatedConvWhenLastDimIsDynamic
// CHECK: [[STB:%.*]] = "tf.SpaceToBatchND"
// CHECK-NEXT: [[CONV:%.*]] = "tf.Conv2D"
// CHECK-NEXT: [[RESULT:%.*]] = "tf.BatchToSpaceND"
// CHECK-NEXT: return [[RESULT]]
}
func @testNoDilatedConvWhenGivenInputIsNonFloatType(%arg0: tensor<1x128x128x3xi32>, %arg1: tensor<5x5x3x8xi32>) -> tensor<1x128x128x8xi32> {
%cst = constant dense<[2, 2]> : tensor<2xi32>
%cst_0 = constant dense<4> : tensor<2x2xi32>
%0 = "tf.SpaceToBatchND"(%arg0, %cst, %cst_0) : (tensor<1x128x128x3xi32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<4x68x68x3xi32>
%1 = "tf.Conv2D"(%0, %arg1) {padding = "VALID", strides = [1, 1, 1, 1]} : (tensor<4x68x68x3xi32>, tensor<5x5x3x8xi32>) -> tensor<4x64x64x8xi32>
%2 = "tf.BatchToSpaceND"(%1, %cst, %cst_0) : (tensor<4x64x64x8xi32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<1x128x128x8xi32>
return %2 : tensor<1x128x128x8xi32>
// CHECK-LABEL: testNoDilatedConvWhenGivenInputIsNonFloatType
// CHECK: [[STB:%.*]] = "tf.SpaceToBatchND"
// CHECK-NEXT: [[CONV:%.*]] = "tf.Conv2D"
// CHECK-NEXT: [[RESULT:%.*]] = "tf.BatchToSpaceND"
// CHECK-NEXT: return [[RESULT]]
}
......@@ -92,6 +92,15 @@ LogicalResult ConvertTFDilatedConvOp<Conv2dOpTy>::matchAndRewrite(
return failure();
}
if (!TFTypeIsFloatTensor(op.input()) || !TFDataFormatIsNHWC(op))
return failure();
// Allow dynamic width and height dimensions only.
auto result_ty = op.getResult().getType().template cast<TensorType>();
if (!result_ty.hasRank() || result_ty.getRank() != 4 ||
result_ty.isDynamicDim(0) || result_ty.isDynamicDim(3))
return failure();
// Check if the ConvOp is preceded by a `Expand` op and succeeded by a
// `Squeeze` op.
Operation* prev_op = op.getOperation()->getPrevNode();
......
......@@ -1314,7 +1314,7 @@ LogicalResult SpaceToBatchNDOp::inferReturnTypes(
SmallVector<int64_t, 4> return_shape(input_rank, ShapedType::kDynamicSize);
// The return has all dimension sizes unknown when block_rank is unknown.
if (block_rank == -1) {
if (block_rank == ShapedType::kDynamicSize) {
inferredReturnTypes.assign(
{RankedTensorType::get(return_shape, input_type.getElementType())});
return success();
......@@ -1333,6 +1333,14 @@ LogicalResult SpaceToBatchNDOp::inferReturnTypes(
matchPattern(paddings_val, m_Constant(&paddings_attr))) {
int64_t return_batch = input_shape[0];
for (uint64_t i = 0; i < block_rank; ++i) {
// Propagate dynamic dimension.
if (input_shape[i + 1] == ShapedType::kDynamicSize) {
return_batch = ShapedType::kDynamicSize;
}
if (return_batch == ShapedType::kDynamicSize) {
return_shape[1 + i] = ShapedType::kDynamicSize;
continue;
}
int64_t paddings_sum =
paddings_attr.getValue({i, 0}).cast<IntegerAttr>().getInt() +
paddings_attr.getValue({i, 1}).cast<IntegerAttr>().getInt();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册