Skip to content

Commit

Permalink
Add support for logical xor op.
Browse files Browse the repository at this point in the history
* Add end-to-end implementation of the logical xor op.
* Add stablehlo to ttir conversion
  • Loading branch information
mmanzoorTT committed Nov 4, 2024
1 parent 6100428 commit 04e0663
Show file tree
Hide file tree
Showing 13 changed files with 123 additions and 25 deletions.
7 changes: 7 additions & 0 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,13 @@ def TTIR_LogicalOrOp : TTIR_ElementwiseBinaryOp<"logical_or"> {
}];
}

def TTIR_LogicalXorOp : TTIR_ElementwiseBinaryOp<"logical_xor"> {
let summary = "Eltwise logical xor.";
let description = [{
Eltwise logical xor operation.
}];
}

def TTIR_MaximumOp : TTIR_ElementwiseBinaryOp<"maximum"> {
let summary = "Eltwise maximum OP.";
let description = [{
Expand Down
7 changes: 7 additions & 0 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,13 @@ def TTNN_LogicalOrOp : TTNN_ElementwiseBinaryOp<"logical_or"> {
}];
}

def TTNN_LogicalXorOp : TTNN_ElementwiseBinaryOp<"logical_xor"> {
let summary = "Eltwise logical xor.";
let description = [{
Eltwise logical xor operation.
}];
}

def TTNN_MaximumOp : TTNN_ElementwiseBinaryOp<"maximum"> {
let summary = "Eltwise maximum OP.";
let description = [{
Expand Down
3 changes: 2 additions & 1 deletion include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,8 @@ enum EltwiseOpType: uint32 {
Log = 28,
Log1p = 29,
Expm1 = 30,
Sign = 31
Sign = 31,
LogicalXor = 32,
}

union EltwiseOpParams {
Expand Down
3 changes: 3 additions & 0 deletions lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1022,6 +1022,9 @@ void addLogicalOpConversionPattern(MLIRContext *ctx,
ctx);
patterns.add<StableHLOToTTIROpLogicalOpConversionPattern<
mlir::stablehlo::OrOp, mlir::tt::ttir::LogicalOrOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpLogicalOpConversionPattern<
mlir::stablehlo::XorOp, mlir::tt::ttir::LogicalXorOp>>(typeConverter,
ctx);
}

void addSliceOpConversionPattern(MLIRContext *ctx, RewritePatternSet &patterns,
Expand Down
1 change: 1 addition & 0 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -863,6 +863,7 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns,
ElementwiseOpConversionPattern<ttir::LogicalAndOp, ttnn::LogicalAndOp>,
ElementwiseOpConversionPattern<ttir::LogicalOrOp, ttnn::LogicalOrOp>,
ElementwiseOpConversionPattern<ttir::LogicalNotOp, ttnn::LogicalNotOp>,
ElementwiseOpConversionPattern<ttir::LogicalXorOp, ttnn::LogicalXorOp>,
ElementwiseOpConversionPattern<ttir::MultiplyOp, ttnn::MultiplyOp>,
ElementwiseOpConversionPattern<ttir::EqualOp, ttnn::EqualOp>,
ElementwiseOpConversionPattern<ttir::NotEqualOp, ttnn::NotEqualOp>,
Expand Down
1 change: 1 addition & 0 deletions lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -637,6 +637,7 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
patterns.add<DefaultOpConversionPattern<ttnn::AddOp>,
DefaultOpConversionPattern<ttnn::LogicalAndOp>,
DefaultOpConversionPattern<ttnn::LogicalOrOp>,
DefaultOpConversionPattern<ttnn::LogicalXorOp>,
DefaultOpConversionPattern<ttnn::SubtractOp>,
MultiplyOpConversionPattern,
DefaultOpConversionPattern<ttnn::EqualOp>,
Expand Down
5 changes: 5 additions & 0 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,8 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) {
type = ::tt::target::ttnn::EltwiseOpType::LogicalNot;
} else if constexpr (std::is_same_v<EltwiseOp, LogicalOrOp>) {
type = ::tt::target::ttnn::EltwiseOpType::LogicalOr;
} else if constexpr (std::is_same_v<EltwiseOp, LogicalXorOp>) {
type = ::tt::target::ttnn::EltwiseOpType::LogicalXor;
} else if constexpr (std::is_same_v<EltwiseOp, MultiplyOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Multiply;
} else if constexpr (std::is_same_v<EltwiseOp, NegOp>) {
Expand Down Expand Up @@ -556,6 +558,9 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op,
if (auto orOp = dyn_cast<LogicalOrOp>(op); orOp) {
return createOperation(cache, createEltwiseOp(cache, orOp), debugString);
}
if (auto xorOp = dyn_cast<LogicalXorOp>(op); xorOp) {
return createOperation(cache, createEltwiseOp(cache, xorOp), debugString);
}
if (auto multiplyOp = dyn_cast<MultiplyOp>(op); multiplyOp) {
return createOperation(cache, createEltwiseOp(cache, multiplyOp),
debugString);
Expand Down
4 changes: 4 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/binary/binary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,10 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseBinaryOP(op, tensorPool, ::ttnn::logical_or);
break;
}
case ::tt::target::ttnn::EltwiseOpType::LogicalXor: {
runEltwiseBinaryOP(op, tensorPool, ::ttnn::logical_xor);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Multiply: {
runEltwiseBinaryOP(op, tensorPool, ::ttnn::multiply);
break;
Expand Down
39 changes: 15 additions & 24 deletions test/ttmlir/Conversion/StableHLOToTTIR/binary/logical_op.mlir
Original file line number Diff line number Diff line change
@@ -1,39 +1,30 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
module @jit_eltwise_compare attributes {} {
module @jit_eltwise_logical attributes {} {
func.func public @logical_and(%arg0: tensor<13x31xi1>, %arg1: tensor<13x31xi1>) -> tensor<13x31xi1> {
%0 = stablehlo.and %arg0, %arg1 : tensor<13x31xi1>
// CHECK: %[[E:.*]] = tensor.empty() : tensor<13x31xbf16>
// CHECK: %[[E:.*]] = tensor.empty() : [[TENSOR:tensor<13x31xbf16>]]
// CHECK: = "ttir.logical_and"(%arg0, %arg1, %[[E]])
// CHECK-SAME: (tensor<13x31xbf16>, tensor<13x31xbf16>, tensor<13x31xbf16>) -> tensor<13x31xbf16>
// CHECK-SAME: ([[TENSOR]], [[TENSOR]], [[TENSOR]]) -> [[TENSOR]]
%0 = stablehlo.and %arg0, %arg1 : tensor<13x31xi1>
// CHECK: return %1 : [[TENSOR]]
return %0 : tensor<13x31xi1>
// CHECK: return %1 : tensor<13x31xbf16>
}

func.func public @logical_or(%arg0: tensor<13x31xi1>, %arg1: tensor<13x31xi1>) -> tensor<13x31xi1> {
%0 = stablehlo.or %arg0, %arg1 : tensor<13x31xi1>
// CHECK: %[[E:.*]] = tensor.empty() : tensor<13x31xbf16>
// CHECK: %[[E:.*]] = tensor.empty() : [[TENSOR:tensor<13x31xbf16>]]
// CHECK: = "ttir.logical_or"(%arg0, %arg1, %[[E]])
// CHECK-SAME: (tensor<13x31xbf16>, tensor<13x31xbf16>, tensor<13x31xbf16>) -> tensor<13x31xbf16>
// CHECK-SAME: ([[TENSOR]], [[TENSOR]], [[TENSOR]]) -> [[TENSOR]]
%0 = stablehlo.or %arg0, %arg1 : tensor<13x31xi1>
// CHECK: return %1 : [[TENSOR]]
return %0 : tensor<13x31xi1>
// CHECK: return %1 : tensor<13x31xbf16>
}

func.func public @logical_not(%arg0: tensor<13x31xi1>) -> tensor<13x31xi1> {
%0 = stablehlo.not %arg0 : tensor<13x31xi1>
// CHECK: %[[E:.*]] = tensor.empty() : tensor<13x31xbf16>
// CHECK: = "ttir.logical_not"(%arg0, %[[E]])
// CHECK-SAME: (tensor<13x31xbf16>, tensor<13x31xbf16>) -> tensor<13x31xbf16>
func.func public @logical_xor(%arg0: tensor<13x31xi1>, %arg1: tensor<13x31xi1>) -> tensor<13x31xi1> {
// CHECK: %[[E:.*]] = tensor.empty() : [[TENSOR:tensor<13x31xbf16>]]
// CHECK: = "ttir.logical_xor"(%arg0, %arg1, %[[E]])
// CHECK-SAME: ([[TENSOR]], [[TENSOR]], [[TENSOR]]) -> [[TENSOR]]
%0 = stablehlo.xor %arg0, %arg1 : tensor<13x31xi1>
// CHECK: return %1 : [[TENSOR]]
return %0 : tensor<13x31xi1>
// CHECK: return %1 : tensor<13x31xbf16>
}

func.func public @logical_not_scalar(%arg0: tensor<i1>) -> tensor<i1> {
%0 = stablehlo.not %arg0 : tensor<i1>
// CHECK: %[[E:.*]] = tensor.empty() : tensor<1xbf16>
// CHECK: = "ttir.logical_not"(%arg0, %[[E]])
// CHECK-SAME: (tensor<1xbf16>, tensor<1xbf16>) -> tensor<1xbf16>
return %0 : tensor<i1>
// CHECK: return %1 : tensor<1xbf16>
}
}
22 changes: 22 additions & 0 deletions test/ttmlir/Conversion/StableHLOToTTIR/unary/logical_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
module @jit_eltwise_logical attributes {} {
func.func public @logical_not(%arg0: tensor<13x31xi1>) -> tensor<13x31xi1> {
// CHECK: %[[E:.*]] = tensor.empty() : tensor<13x31xbf16>
// CHECK: = "ttir.logical_not"(%arg0, %[[E]])
// CHECK-SAME: (tensor<13x31xbf16>, tensor<13x31xbf16>) -> tensor<13x31xbf16>
%0 = stablehlo.not %arg0 : tensor<13x31xi1>
// CHECK: return %1 : tensor<13x31xbf16>
return %0 : tensor<13x31xi1>
}

func.func public @logical_not_scalar(%arg0: tensor<i1>) -> tensor<i1> {
// CHECK: %[[E:.*]] = tensor.empty() : tensor<1xbf16>
// CHECK: = "ttir.logical_not"(%arg0, %[[E]])
// CHECK-SAME: ([[TENSOR]], [[TENSOR]]) -> [[TENSOR]]
%0 = stablehlo.not %arg0 : tensor<i1>
// CHECK: return %1 : [[TENSOR]]
return %0 : tensor<i1>

}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @logical_xor(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x128xbf16> {
// CHECK: %{{[0-9]+}} = "ttnn.empty"{{.*}} [[TENSOR:tensor<64x128xbf16]]
%0 = tensor.empty() : tensor<64x128xbf16>
// CHECK: %{{[0-9]+}} = "ttnn.logical_xor"
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: -> [[TENSOR]]
%1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16>
return %1 : tensor<64x128xbf16>
}
}
19 changes: 19 additions & 0 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_xor.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// REQUIRES: https://github.com/tenstorrent/tt-mlir/issues/1149

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @logical_xor(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x128xbf16> {
// CHECK: %{{[0-9]+}} = "ttnn.empty"{{.*}} [[TENSOR:tensor<64x128xbf16]]
%0 = tensor.empty() : tensor<64x128xbf16>
// CHECK: %{{[0-9]+}} = "ttnn.logical_xor"
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: -> [[TENSOR]]
%1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16>
return %1 : tensor<64x128xbf16>
}
21 changes: 21 additions & 0 deletions test/ttmlir/Silicon/TTNN/simple_logical_xor.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
// REQUIRES: https://github.com/tenstorrent/tt-mlir/issues/1149

#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

module attributes {} {
func.func @logical_xor(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x128xbf16> {
// CHECK: %{{[0-9]+}} = "ttnn.empty"{{.*}} [[TENSOR:tensor<64x128xbf16]]
%0 = tensor.empty() : tensor<64x128xbf16>
// CHECK: %{{[0-9]+}} = "ttnn.logical_xor"
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: [[TENSOR]]
// CHECK-SAME: -> [[TENSOR]]
%1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array<i32: 2, 1>, operand_constraints = [#any_device, #any_device, #any_device]}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16>
return %1 : tensor<64x128xbf16>
}
}

0 comments on commit 04e0663

Please sign in to comment.