diff --git a/docs/spec.md b/docs/spec.md index f9c50cd464d..31fbf79f5a3 100644 --- a/docs/spec.md +++ b/docs/spec.md @@ -3057,7 +3057,7 @@ op, but the result shape is specified dynamically via `output_shape`. This operation is functionally identical to [pad](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#pad) -op, but with `edge_padding_low`, `edge_padding_high` and `interior_padding` +op, but with `edge_padding_low`, `edge_padding_high`, and `interior_padding` specified dynamically as values. #### Inputs @@ -3099,7 +3099,7 @@ specified dynamically as values. // %interior_padding: [1, 2] %result = "stablehlo.dynamic_pad"(%operand, %padding_value, %edge_padding_low, %edge_padding_high, %interior_padding -) : (tensor<2x3xi32>, tensor, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<5x9xi32> +) : (tensor<2x3xi64>, tensor, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<5x9xi64> // %result: [ // [0, 1, 0, 0, 2, 0, 0, 3, 0], // [0, 0, 0, 0, 0, 0, 0, 0, 0], diff --git a/stablehlo/dialect/StablehloOps.td b/stablehlo/dialect/StablehloOps.td index adc83c16de3..845a636b220 100644 --- a/stablehlo/dialect/StablehloOps.td +++ b/stablehlo/dialect/StablehloOps.td @@ -3434,7 +3434,7 @@ def StableHLO_DynamicPadOp: StableHLO_ShapedInterfaceOp<"dynamic_pad", %interior_padding = stablehlo.constant dense<[1, 2]> : tensor<2xi32> %result = stablehlo.dynamic_pad %operand, %padding_value, %edge_padding_low, %edge_padding_high, %interior_padding - : (tensor<2x3xi32>, tensor, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<5x9xi32> + : (tensor<2x3xi64>, tensor, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<5x9xi64> ``` }]; let arguments = (ins diff --git a/stablehlo/dialect/TypeInference.cpp b/stablehlo/dialect/TypeInference.cpp index a873748835c..603b5d2558c 100644 --- a/stablehlo/dialect/TypeInference.cpp +++ b/stablehlo/dialect/TypeInference.cpp @@ -4085,16 +4085,14 @@ LogicalResult verifyDynamicPadOp(std::optional location, auto inputType = cast(operand.getType()); int inputRank = inputType.getRank(); - /*dynamic_pad_c2*/ - // edgePaddingLow, edgePaddingHigh and interiorPadding are enforced to have - // the same size by ODS + // dynamic_pad_c2 auto paddingLowType = cast(edgePaddingLow.getType()); auto paddingSize = paddingLowType.getDimSize(0); if (paddingSize != inputRank) return emitOptionalError(location, "padding operands size (", paddingSize, ") must match operand rank (", inputRank, ")"); - /*dynamic_pad_c3*/ + // dynamic_pad_c3 SmallVector interiorPaddingValues; auto interiorPaddingMatched = matchInts(interiorPadding, interiorPaddingValues); @@ -4112,11 +4110,12 @@ LogicalResult verifyDynamicPadOp(std::optional location, SmallVector edgePaddingLowValues; if (failed(matchInts(edgePaddingLow, edgePaddingLowValues))) return success(); + SmallVector edgePaddingHighValues; if (failed(matchInts(edgePaddingHigh, edgePaddingHighValues))) return success(); - /*dynamic_pad_c4*/ + // dynamic_pad_c4 for (auto [i, in, out, low, high, interior] : llvm::enumerate( inputType.getShape(), outputType.getShape(), edgePaddingLowValues, edgePaddingHighValues, interiorPaddingValues)) { diff --git a/stablehlo/tests/ops_stablehlo.mlir b/stablehlo/tests/ops_stablehlo.mlir index 9798f340afc..1928f3332ee 100644 --- a/stablehlo/tests/ops_stablehlo.mlir +++ b/stablehlo/tests/ops_stablehlo.mlir @@ -3229,6 +3229,52 @@ func.func @dot_general_one_element_precision_config(%arg0: tensor<2x3x4xf32>, %a // ----- +func.func @dynamic_pad( + %arg: tensor<4xf64>, %padding_value: tensor, + %padding_low: tensor<1xi32>, %padding_high: tensor<1xi32>, %interior_padding: tensor<1xi32> +) { + %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding + : (tensor<4xf64>, tensor, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor + func.return +} + +// ----- + +func.func @dynamic_pad_c2( + %arg: tensor<4xf64>, %padding_value: tensor, + %padding_low: tensor<2xi32>, %padding_high: tensor<2xi32>, %interior_padding: tensor<2xi32> +) { + // @expected-error@+1 {{padding operands size (2) must match operand rank (1)}} + %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding + : (tensor<4xf64>, tensor, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor + func.return +} + +// ----- + +func.func @dynamic_pad_c3( + %arg: tensor<4xf64>, %padding_value: tensor, + %padding_low: tensor<1xi32>, %padding_high: tensor<1xi32> +) { + %interior_padding = stablehlo.constant dense<-1> : tensor<1xi32> + // @expected-error@+1 {{interior_padding must be non-negative, but got -1}} + %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding + : (tensor<4xf64>, tensor, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor + func.return +} + +// ----- + +func.func @dynamic_pad_c4(%arg: tensor<4xf64>, %padding_value: tensor) { + %padding = stablehlo.constant dense<1> : tensor<1xi32> + // @expected-error@+1 {{expected output dimension at index 0 to equal 9, but got 4}} + %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding, %padding, %padding + : (tensor<4xf64>, tensor, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<4xf64> + func.return +} + +// ----- + func.func @dynamic_reshape(%arg0: tensor, %shape: tensor<2xindex>) -> tensor { %0 = "stablehlo.dynamic_reshape"(%arg0, %shape) : (tensor, tensor<2xindex>) -> tensor func.return %0 : tensor @@ -5899,49 +5945,3 @@ func.func @composite_c4(%arg0: !stablehlo.token) { } : (!stablehlo.token) -> tensor func.return } - -// ----- - -func.func @dynamic_pad( - %arg: tensor<4xf64>, %padding_value: tensor, - %padding_low: tensor<1xi32>, %padding_high: tensor<1xi32>, %interior_padding: tensor<1xi32> -) { - %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding - : (tensor<4xf64>, tensor, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor - func.return -} - -// ----- - -func.func @dynamic_pad_c2( - %arg: tensor<4xf64>, %padding_value: tensor, - %padding_low: tensor<2xi32>, %padding_high: tensor<2xi32>, %interior_padding: tensor<2xi32> -) { - // @expected-error@+1 {{padding operands size (2) must match operand rank (1)}} - %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding - : (tensor<4xf64>, tensor, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor - func.return -} - -// ----- - -func.func @dynamic_pad_c3( - %arg: tensor<4xf64>, %padding_value: tensor, - %padding_low: tensor<1xi32>, %padding_high: tensor<1xi32> -) { - %interior_padding = stablehlo.constant dense<-1> : tensor<1xi32> - // @expected-error@+1 {{interior_padding must be non-negative, but got -1}} - %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding - : (tensor<4xf64>, tensor, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor - func.return -} - -// ----- - -func.func @dynamic_pad_c4(%arg: tensor<4xf64>, %padding_value: tensor) { - %padding = stablehlo.constant dense<1> : tensor<1xi32> - // @expected-error@+1 {{expected output dimension at index 0 to equal 9, but got 4}} - %0 = stablehlo.dynamic_pad %arg, %padding_value, %padding, %padding, %padding - : (tensor<4xf64>, tensor, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<4xf64> - func.return -}