Skip to content

Commit

Permalink
Fix for ttir.arange decomposition (#1564)
Browse files Browse the repository at this point in the history
* ArrayRef is created with temporary array (rvalue) which is not available
afterwards and causes address sanitizer error (stack-use-after-scope).
  • Loading branch information
mmanzoorTT authored Dec 12, 2024
1 parent f22c416 commit 0ce883a
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ struct ConvolutionDecompositionPattern
}
};

// A decompostion pattern that matches to a ttir.convolution op that does 1D
// A decomposition pattern that matches to a ttir.convolution op that does 1D
// convolution. Since that is not supported in ttnn, we reshape the inputs and
// the output to match a 2D ttir.convolution op. The expectation is that the new
// ttir.convolution op will be picked up by the ConvolutionToConv2dPattern and
Expand Down Expand Up @@ -1080,14 +1080,14 @@ struct ArangeForceLastDimensionPattern

int64_t arangeLength = (end - start) / step;

ArrayRef<int64_t> ttnnShape = {1, 1, 1, arangeLength};
const llvm::SmallVector<int64_t, 4> requiredShape{1, 1, 1, arangeLength};
ArrayRef<int64_t> ttnnShape(requiredShape);
if (ttnnShape == outputType.getShape()) {
return success();
}

RankedTensorType arangeOutputType = RankedTensorType::get(
SmallVector<int64_t>({1, 1, 1, arangeLength}),
outputType.getElementType(), outputType.getEncoding());
requiredShape, outputType.getElementType(), outputType.getEncoding());

Value output =
rewriter
Expand Down
19 changes: 19 additions & 0 deletions test/ttmlir/Decomposition/TTIR/arange/arange_tests_positive.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
// RUN: ttmlir-opt --ttir-to-ttir-decomposition %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32> {
// CHECK: %[[ARANGE:[0-9]+]] = "ttir.arange"
// CHECK-SAME: {arange_dimension = 3 : i64, end = 32 : si64, start = 0 : si64, step = 1 : si64}
// CHECK-SAME: -> tensor<1x1x1x32xf32>
// CHECK: %[[TRANSPOSE:[0-9]+]] = "ttir.transpose"(%[[ARANGE]],
// CHECK-SAME: {dim0 = 1 : si32, dim1 = 3 : si32,
// CHECK-SAME: (tensor<1x1x1x32xf32>, tensor<1x32x1x1xf32>) -> tensor<1x32x1x1xf32>
// CHECK: %[[BROADCAST:[0-9]+]] = "ttir.broadcast"(%[[TRANSPOSE]],
// CHECK-SAME: {dimension = [2, 3]
// CHECK-SAME: (tensor<1x32x1x1xf32>, tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32>
%1 = "ttir.arange"() <{start = 0: si64, end = 32: si64, step = 1: si64, arange_dimension = 1: i64}> : () -> tensor<1x32x128x128xf32>
%dps = tensor.empty() : tensor<1x32x128x128xf32>
%2 = "ttir.multiply"(%arg0, %1, %dps) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32>
return %2 : tensor<1x32x128x128xf32>
}
}

0 comments on commit 0ce883a

Please sign in to comment.