Skip to content

Commit

Permalink
Address post-submit comments on openxla#2306 (openxla#2324)
Browse files Browse the repository at this point in the history
Thanks for taking the time to review the code!

FYI:
* I didn't quite understand this comment:
openxla#2306 (comment)
* I didn't implement this suggestion (I think the existing code is
correct):
openxla#2306 (comment)
  • Loading branch information
mlevesquedion authored May 11, 2024
1 parent 306b7e9 commit 532a0ef
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 54 deletions.
4 changes: 2 additions & 2 deletions docs/spec.md
Original file line number Diff line number Diff line change
Expand Up @@ -3057,7 +3057,7 @@ op, but the result shape is specified dynamically via `output_shape`.

This operation is functionally identical to
[pad](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#pad)
op, but with `edge_padding_low`, `edge_padding_high` and `interior_padding`
op, but with `edge_padding_low`, `edge_padding_high`, and `interior_padding`
specified dynamically as values.

#### Inputs
Expand Down Expand Up @@ -3099,7 +3099,7 @@ specified dynamically as values.
// %interior_padding: [1, 2]
%result = "stablehlo.dynamic_pad"(%operand, %padding_value,
%edge_padding_low, %edge_padding_high, %interior_padding
) : (tensor<2x3xi32>, tensor<i32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<5x9xi32>
) : (tensor<2x3xi64>, tensor<i64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<5x9xi64>
// %result: [
// [0, 1, 0, 0, 2, 0, 0, 3, 0],
// [0, 0, 0, 0, 0, 0, 0, 0, 0],
Expand Down
2 changes: 1 addition & 1 deletion stablehlo/dialect/StablehloOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -3434,7 +3434,7 @@ def StableHLO_DynamicPadOp: StableHLO_ShapedInterfaceOp<"dynamic_pad",
%interior_padding = stablehlo.constant dense<[1, 2]> : tensor<2xi32>
%result = stablehlo.dynamic_pad %operand, %padding_value,
%edge_padding_low, %edge_padding_high, %interior_padding
: (tensor<2x3xi32>, tensor<i32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<5x9xi32>
: (tensor<2x3xi64>, tensor<i64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<5x9xi64>
```
}];
let arguments = (ins
Expand Down
9 changes: 4 additions & 5 deletions stablehlo/dialect/TypeInference.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4085,16 +4085,14 @@ LogicalResult verifyDynamicPadOp(std::optional<Location> location,
auto inputType = cast<RankedTensorType>(operand.getType());
int inputRank = inputType.getRank();

/*dynamic_pad_c2*/
// edgePaddingLow, edgePaddingHigh and interiorPadding are enforced to have
// the same size by ODS
// dynamic_pad_c2
auto paddingLowType = cast<RankedTensorType>(edgePaddingLow.getType());
auto paddingSize = paddingLowType.getDimSize(0);
if (paddingSize != inputRank)
return emitOptionalError(location, "padding operands size (", paddingSize,
") must match operand rank (", inputRank, ")");

/*dynamic_pad_c3*/
// dynamic_pad_c3
SmallVector<int64_t> interiorPaddingValues;
auto interiorPaddingMatched =
matchInts(interiorPadding, interiorPaddingValues);
Expand All @@ -4112,11 +4110,12 @@ LogicalResult verifyDynamicPadOp(std::optional<Location> location,

SmallVector<int64_t> edgePaddingLowValues;
if (failed(matchInts(edgePaddingLow, edgePaddingLowValues))) return success();

SmallVector<int64_t> edgePaddingHighValues;
if (failed(matchInts(edgePaddingHigh, edgePaddingHighValues)))
return success();

/*dynamic_pad_c4*/
// dynamic_pad_c4
for (auto [i, in, out, low, high, interior] : llvm::enumerate(
inputType.getShape(), outputType.getShape(), edgePaddingLowValues,
edgePaddingHighValues, interiorPaddingValues)) {
Expand Down
92 changes: 46 additions & 46 deletions stablehlo/tests/ops_stablehlo.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -3229,6 +3229,52 @@ func.func @dot_general_one_element_precision_config(%arg0: tensor<2x3x4xf32>, %a

// -----

func.func @dynamic_pad(
%arg: tensor<4xf64>, %padding_value: tensor<f64>,
%padding_low: tensor<1xi32>, %padding_high: tensor<1xi32>, %interior_padding: tensor<1xi32>
) {
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding
: (tensor<4xf64>, tensor<f64>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<?xf64>
func.return
}

// -----

func.func @dynamic_pad_c2(
%arg: tensor<4xf64>, %padding_value: tensor<f64>,
%padding_low: tensor<2xi32>, %padding_high: tensor<2xi32>, %interior_padding: tensor<2xi32>
) {
// @expected-error@+1 {{padding operands size (2) must match operand rank (1)}}
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding
: (tensor<4xf64>, tensor<f64>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<?xf64>
func.return
}

// -----

func.func @dynamic_pad_c3(
%arg: tensor<4xf64>, %padding_value: tensor<f64>,
%padding_low: tensor<1xi32>, %padding_high: tensor<1xi32>
) {
%interior_padding = stablehlo.constant dense<-1> : tensor<1xi32>
// @expected-error@+1 {{interior_padding must be non-negative, but got -1}}
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding
: (tensor<4xf64>, tensor<f64>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<?xf64>
func.return
}

// -----

func.func @dynamic_pad_c4(%arg: tensor<4xf64>, %padding_value: tensor<f64>) {
%padding = stablehlo.constant dense<1> : tensor<1xi32>
// @expected-error@+1 {{expected output dimension at index 0 to equal 9, but got 4}}
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding, %padding, %padding
: (tensor<4xf64>, tensor<f64>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<4xf64>
func.return
}

// -----

func.func @dynamic_reshape(%arg0: tensor<?xf32>, %shape: tensor<2xindex>) -> tensor<?x?xf32> {
%0 = "stablehlo.dynamic_reshape"(%arg0, %shape) : (tensor<?xf32>, tensor<2xindex>) -> tensor<?x?xf32>
func.return %0 : tensor<?x?xf32>
Expand Down Expand Up @@ -5899,49 +5945,3 @@ func.func @composite_c4(%arg0: !stablehlo.token) {
} : (!stablehlo.token) -> tensor<f32>
func.return
}

// -----

func.func @dynamic_pad(
%arg: tensor<4xf64>, %padding_value: tensor<f64>,
%padding_low: tensor<1xi32>, %padding_high: tensor<1xi32>, %interior_padding: tensor<1xi32>
) {
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding
: (tensor<4xf64>, tensor<f64>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<?xf64>
func.return
}

// -----

func.func @dynamic_pad_c2(
%arg: tensor<4xf64>, %padding_value: tensor<f64>,
%padding_low: tensor<2xi32>, %padding_high: tensor<2xi32>, %interior_padding: tensor<2xi32>
) {
// @expected-error@+1 {{padding operands size (2) must match operand rank (1)}}
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding
: (tensor<4xf64>, tensor<f64>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<?xf64>
func.return
}

// -----

func.func @dynamic_pad_c3(
%arg: tensor<4xf64>, %padding_value: tensor<f64>,
%padding_low: tensor<1xi32>, %padding_high: tensor<1xi32>
) {
%interior_padding = stablehlo.constant dense<-1> : tensor<1xi32>
// @expected-error@+1 {{interior_padding must be non-negative, but got -1}}
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding_low, %padding_high, %interior_padding
: (tensor<4xf64>, tensor<f64>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<?xf64>
func.return
}

// -----

func.func @dynamic_pad_c4(%arg: tensor<4xf64>, %padding_value: tensor<f64>) {
%padding = stablehlo.constant dense<1> : tensor<1xi32>
// @expected-error@+1 {{expected output dimension at index 0 to equal 9, but got 4}}
%0 = stablehlo.dynamic_pad %arg, %padding_value, %padding, %padding, %padding
: (tensor<4xf64>, tensor<f64>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<4xf64>
func.return
}

0 comments on commit 532a0ef

Please sign in to comment.