Skip to content

Commit

Permalink
Renamed params in transpose op (#335)
Browse files Browse the repository at this point in the history
* Renamed tranpose operands to dim0 and dim1, to match Forge.

* Updated tensor sizes in tests to match allowed buffer sizes.
  • Loading branch information
vladimirjovanovicTT authored Aug 19, 2024
1 parent a79d106 commit 8021450
Show file tree
Hide file tree
Showing 12 changed files with 53 additions and 55 deletions.
4 changes: 2 additions & 2 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,8 @@ def TTIR_TransposeOp : TTIR_DPSOp<"transpose"> {

let arguments = (ins AnyRankedTensor:$input,
AnyRankedTensor:$output,
SI32Attr:$dimension1,
SI32Attr:$dimension2,
SI32Attr:$dim0,
SI32Attr:$dim1,
TT_OperandConstraintArrayAttr:$operand_constraints);

let results = (outs AnyRankedTensor:$result);
Expand Down
4 changes: 2 additions & 2 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,8 @@ def TTNN_TransposeOp : TTNN_NamedDPSOp<"transpose"> {

let arguments = (ins AnyRankedTensor:$input,
AnyRankedTensor:$output,
SI32Attr:$dimension1,
SI32Attr:$dimension2);
SI32Attr:$dim0,
SI32Attr:$dim1);

let results = (outs AnyRankedTensor:$result);

Expand Down
4 changes: 2 additions & 2 deletions include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ table SoftmaxOp {
table TransposeOp {
in: tt.target.TensorRef;
out: tt.target.TensorRef;
dimension1: int32;
dimension2: int32;
dim0: int32;
dim1: int32;
}

// ANCHOR: adding_an_op_matmul_fbs
Expand Down
4 changes: 2 additions & 2 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,8 @@ class TransposeOpConversionPattern
ConversionPatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<ttnn::TransposeOp>(
op, this->getTypeConverter()->convertType(op.getType()),
adaptor.getInput(), adaptor.getOutput(), adaptor.getDimension1(),
adaptor.getDimension2());
adaptor.getInput(), adaptor.getOutput(), adaptor.getDim0(),
adaptor.getDim1());
return success();
}
};
Expand Down
20 changes: 10 additions & 10 deletions lib/Dialect/TTIR/IR/TTIROps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,30 +73,30 @@ ::mlir::LogicalResult mlir::tt::ttir::TransposeOp::verify() {
::mlir::RankedTensorType outputType = getOutput().getType();
auto inputShape = inputType.getShape();
auto outputShape = outputType.getShape();
int32_t dim1 = getDimension1();
int32_t dim2 = getDimension2();
int32_t dim0 = getDim0();
int32_t dim1 = getDim1();
if (inputType.getRank() < 2) {
return emitOpError("Input must be at least a 2D tensor");
}
if (inputType.getRank() != outputType.getRank()) {
return emitOpError("Input must have the same rank as output");
}
if (dim0 >= inputType.getRank() || dim0 < -inputType.getRank()) {
return emitOpError(
"Dimension 0 attribute must be within the bounds of the input tensor");
}
if (dim1 >= inputType.getRank() || dim1 < -inputType.getRank()) {
return emitOpError(
"Dimension 1 attribute must be within the bounds of the input tensor");
}
if (dim2 >= inputType.getRank() || dim2 < -inputType.getRank()) {
return emitOpError(
"Dimension 2 attribute must be within the bounds of the input tensor");
if (dim0 < 0) {
dim0 += inputType.getRank();
}
if (dim1 < 0) {
dim1 += inputType.getRank();
}
if (dim2 < 0) {
dim2 += inputType.getRank();
}
if (outputShape[dim1] != inputShape[dim2] ||
outputShape[dim2] != inputShape[dim1]) {
if (outputShape[dim0] != inputShape[dim1] ||
outputShape[dim1] != inputShape[dim0]) {
return emitOpError("Input-output transpose dimension mismatch.");
}
return success();
Expand Down
20 changes: 10 additions & 10 deletions lib/Dialect/TTNN/IR/TTNNOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,30 +55,30 @@ ::mlir::LogicalResult mlir::tt::ttnn::TransposeOp::verify() {
::mlir::RankedTensorType outputType = getOutput().getType();
auto inputShape = inputType.getShape();
auto outputShape = outputType.getShape();
int32_t dim1 = getDimension1();
int32_t dim2 = getDimension2();
int32_t dim0 = getDim0();
int32_t dim1 = getDim1();
if (inputType.getRank() < 2) {
return emitOpError("Input must be at least a 2D tensor");
}
if (inputType.getRank() != outputType.getRank()) {
return emitOpError("Input must have the same rank as output");
}
if (dim0 >= inputType.getRank() || dim0 < -inputType.getRank()) {
return emitOpError(
"Dimension 0 attribute must be within the bounds of the input tensor");
}
if (dim1 >= inputType.getRank() || dim1 < -inputType.getRank()) {
return emitOpError(
"Dimension 1 attribute must be within the bounds of the input tensor");
}
if (dim2 >= inputType.getRank() || dim2 < -inputType.getRank()) {
return emitOpError(
"Dimension 2 attribute must be within the bounds of the input tensor");
if (dim0 < 0) {
dim0 += inputType.getRank();
}
if (dim1 < 0) {
dim1 += inputType.getRank();
}
if (dim2 < 0) {
dim2 += inputType.getRank();
}
if (outputShape[dim1] != inputShape[dim2] ||
outputShape[dim2] != inputShape[dim1]) {
if (outputShape[dim0] != inputShape[dim1] ||
outputShape[dim1] != inputShape[dim0]) {
return emitOpError("Input-output transpose dimension mismatch.");
}
return success();
Expand Down
7 changes: 3 additions & 4 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,10 @@ createTransposeOp(FlatbufferObjectCache &cache, TransposeOp op) {
cache.at<::tt::target::TensorRef>(getOperandThroughDPSOps(op.getInput()));
auto out = cache.at<::tt::target::TensorRef>(
getOperandThroughDPSOps(op.getResult()));
int32_t dimension1 = op.getDimension1();
int32_t dimension2 = op.getDimension2();
int32_t dim0 = op.getDim0();
int32_t dim1 = op.getDim1();

return ::tt::target::ttnn::CreateTransposeOp(*cache.fbb, in, out, dimension1,
dimension2);
return ::tt::target::ttnn::CreateTransposeOp(*cache.fbb, in, out, dim0, dim1);
}

template <typename SoftmaxOp>
Expand Down
19 changes: 9 additions & 10 deletions runtime/lib/ttnn/program.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,25 +296,24 @@ run(::tt::target::ttnn::TransposeOp const *op, ::ttnn::device::Device &device,
std::unordered_map<std::uint32_t, ::ttnn::Tensor *> &liveTensors,
std::list<::ttnn::Tensor> &tensorPool) {
::ttnn::Tensor &in = *liveTensors.at(op->in()->global_id());
int32_t dimension1 = op->dimension1();
int32_t dimension2 = op->dimension2();
int32_t dim0 = op->dim0();
int32_t dim1 = op->dim1();
auto input_rank = in.get_shape().rank();
// for the current version of permute, we need to work in 4D, so we add
// leading dimensions of size 1
std::vector<std::int64_t> dimensionOrder(4);
std::iota(dimensionOrder.begin(), dimensionOrder.end(), 0);
if (dimension1 < 0) {
dimension1 += 4;
if (dim0 < 0) {
dim0 += 4;
} else {
dimension1 = dimension1 + 4 - input_rank;
dim0 = dim0 + 4 - input_rank;
}

if (dimension2 < 0) {
dimension2 += 4;
if (dim1 < 0) {
dim1 += 4;
} else {
dimension2 = dimension2 + 4 - input_rank;
dim1 = dim1 + 4 - input_rank;
}
std::swap(dimensionOrder[dimension1], dimensionOrder[dimension2]);
std::swap(dimensionOrder[dim0], dimensionOrder[dim1]);
// Ideally this would use ttnn::transpose, but since ttnn::transpose doesn't
// work at the moment, we use this temporary solution.
auto unsqueezed_input = ::ttnn::unsqueeze_to_4D(in);
Expand Down
2 changes: 1 addition & 1 deletion test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ module attributes {} {
func.func @forward(%arg0: tensor<64x128xbf16>) -> tensor<128x64xbf16> {
%0 = tensor.empty() : tensor<128x64xbf16>
// CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]]
%1 = "ttir.transpose"(%arg0, %0) <{dimension1 = 0 : si32, dimension2 = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16>
%1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x128xbf16>, tensor<128x64xbf16>) -> tensor<128x64xbf16>
return %1 : tensor<128x64xbf16>
}
}
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
// RUN: ttmlir-opt --ttir-load-system-desc --ttir-layout --ttnn-open-device --convert-ttir-to-ttnn %s | FileCheck %s
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<8x16xbf16>) -> tensor<16x8xbf16> {
%0 = tensor.empty() : tensor<16x8xbf16>
func.func @forward(%arg0: tensor<64x16xbf16>) -> tensor<16x64xbf16> {
%0 = tensor.empty() : tensor<16x64xbf16>
// CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]]
%1 = "ttir.transpose"(%arg0, %0) <{dimension1 = 1 : si32, dimension2 = 0 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<8x16xbf16>, tensor<16x8xbf16>) -> tensor<16x8xbf16>
return %1 : tensor<16x8xbf16>
%1 = "ttir.transpose"(%arg0, %0) <{dim0 = 1 : si32, dim1 = 0 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<64x16xbf16>, tensor<16x64xbf16>) -> tensor<16x64xbf16>
return %1 : tensor<16x64xbf16>
}
}
8 changes: 4 additions & 4 deletions test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
// RUN: ttmlir-opt --ttir-load-system-desc --ttir-layout --ttnn-open-device --convert-ttir-to-ttnn %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<8x8xbf16>) -> tensor<8x8xbf16> {
%0 = tensor.empty() : tensor<8x8xbf16>
func.func @forward(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> {
%0 = tensor.empty() : tensor<32x32xbf16>
// CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]]
%1 = "ttir.transpose"(%arg0, %0) <{dimension1 = 0 : si32, dimension2 = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<8x8xbf16>, tensor<8x8xbf16>) -> tensor<8x8xbf16>
return %1 : tensor<8x8xbf16>
%1 = "ttir.transpose"(%arg0, %0) <{dim0 = 0 : si32, dim1 = 1 : si32, operand_constraints = [#any_device, #any_device]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16>
return %1 : tensor<32x32xbf16>
}
}
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
// RUN: ttmlir-opt --ttir-load-system-desc --ttir-layout --ttnn-open-device --convert-ttir-to-ttnn %s | FileCheck %s
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<8x8xbf16>) -> tensor<8x8xbf16> {
%0 = tensor.empty() : tensor<8x8xbf16>
func.func @forward(%arg0: tensor<32x32xbf16>) -> tensor<32x32xbf16> {
%0 = tensor.empty() : tensor<32x32xbf16>
// CHECK: %[[C:.*]] = "ttnn.transpose"[[C:.*]]
%1 = "ttir.transpose"(%arg0, %0) <{dimension1 = -1 : si32, dimension2 = -2 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<8x8xbf16>, tensor<8x8xbf16>) -> tensor<8x8xbf16>
return %1 : tensor<8x8xbf16>
%1 = "ttir.transpose"(%arg0, %0) <{dim0 = -1 : si32, dim1 = -2 : si32, operand_constraints = [#any_device_tile, #any_device_tile]}> : (tensor<32x32xbf16>, tensor<32x32xbf16>) -> tensor<32x32xbf16>
return %1 : tensor<32x32xbf16>
}
}

0 comments on commit 8021450

Please sign in to comment.