diff --git a/include/ttmlir/Dialect/TTIR/IR/TTIROps.td b/include/ttmlir/Dialect/TTIR/IR/TTIROps.td index 76a972df1..c0e5a78b8 100644 --- a/include/ttmlir/Dialect/TTIR/IR/TTIROps.td +++ b/include/ttmlir/Dialect/TTIR/IR/TTIROps.td @@ -278,6 +278,19 @@ def TTIR_LogicalNotOp: TTIR_ElementwiseUnaryOp<"logical_not"> { }]; } +def TTIR_BitwiseNotOp : TTIR_ElementwiseUnaryOp<"bitwise_not"> { + let summary = "Eltwise bitwise NOT."; + let description = [{ + Performs element-wise NOT of tensor `operand` and produces a `result` tensor. + + Example: + // Bitwise operation with with integer tensors + // %operand: [[1, 2], [3, 4]] + %result = "ttir.bitwise_not"(%operand) : (tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[-2, -3], [-4, -5]] + }]; +} + def TTIR_NegOp: TTIR_ElementwiseUnaryOp<"neg"> { let summary = "Eltwise negate op."; let description = [{ @@ -514,6 +527,48 @@ def TTIR_LogicalXorOp : TTIR_ElementwiseBinaryOp<"logical_xor"> { }]; } +def TTIR_BitwiseAndOp : TTIR_ElementwiseBinaryOp<"bitwise_and"> { + let summary = "Eltwise bitwise AND."; + let description = [{ + Performs element-wise bitwise AND of two tensors `lhs` and `rhs` + and produces a `result` tensor. + + Example: + // %lhs: [[1, 2], [3, 4]] + // %rhs: [[5, 6], [7, 8]] + %result = "ttir.bitwise_and"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[1, 2], [3, 0]] + }]; +} + +def TTIR_BitwiseOrOp : TTIR_ElementwiseBinaryOp<"bitwise_or"> { + let summary = "Eltwise bitwise OR."; + let description = [{ + Performs element-wise bitwise OR of two tensors `lhs` and `rhs` + and produces a `result` tensor. + + Example: + // %lhs: [[1, 2], [3, 4]] + // %rhs: [[5, 6], [7, 8]] + %result = "ttir.bitwise_or"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[5, 6], [7, 12]] + }]; +} + +def TTIR_BitwiseXorOp : TTIR_ElementwiseBinaryOp<"bitwise_xor"> { + let summary = "Eltwise bitwise XOR."; + let description = [{ + Performs element-wise bitwise XOR of two tensors `lhs` and `rhs` + and produces a `result` tensor. + + Example: + // %lhs: [[1, 2], [3, 4]] + // %rhs: [[5, 6], [7, 8]] + %result = "ttir.bitwise_xor"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[4, 4], [4, 12]] + }]; +} + def TTIR_MinimumOp : TTIR_ElementwiseBinaryOp<"minimum"> { let summary = "Eltwise minimum OP."; let description = [{ diff --git a/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td b/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td index 4e434db56..7567364d1 100644 --- a/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td +++ b/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td @@ -246,6 +246,19 @@ def TTNN_LogicalNotOp: TTNN_ElementwiseUnaryOp<"logical_not"> { }]; } +def TTNN_BitwiseNotOp : TTNN_ElementwiseUnaryOp<"bitwise_not"> { + let summary = "Eltwise bitwise NOT."; + let description = [{ + Performs element-wise NOT of tensor `operand` and produces a `result` tensor. + + Example: + // Bitwise operation with with integer tensors + // %operand: [[1, 2], [3, 4]] + %result = "ttnn.bitwise_not"(%operand) : (tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[-2, -3], [-4, -5]] + }]; +} + def TTNN_NegOp : TTNN_ElementwiseUnaryOp<"neg"> { let summary = "Eltwise negate."; let description = [{ @@ -461,6 +474,48 @@ def TTNN_LogicalXorOp : TTNN_ElementwiseBinaryOp<"logical_xor"> { }]; } +def TTNN_BitwiseAndOp : TTNN_ElementwiseBinaryOp<"bitwise_and"> { + let summary = "Eltwise bitwise AND."; + let description = [{ + Performs element-wise bitwise AND of two tensors `lhs` and `rhs` + and produces a `result` tensor. + + Example: + // %lhs: [[1, 2], [3, 4]] + // %rhs: [[5, 6], [7, 8]] + %result = "ttnn.bitwise_and"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[1, 2], [3, 0]] + }]; +} + +def TTNN_BitwiseOrOp : TTNN_ElementwiseBinaryOp<"bitwise_or"> { + let summary = "Eltwise bitwise OR."; + let description = [{ + Performs element-wise bitwise OR of two tensors `lhs` and `rhs` + and produces a `result` tensor. + + Example: + // %lhs: [[1, 2], [3, 4]] + // %rhs: [[5, 6], [7, 8]] + %result = "ttnn.bitwise_or"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[5, 6], [7, 12]] + }]; +} + +def TTNN_BitwiseXorOp : TTNN_ElementwiseBinaryOp<"bitwise_xor"> { + let summary = "Eltwise bitwise XOR."; + let description = [{ + Performs element-wise bitwise XOR of two tensors `lhs` and `rhs` + and produces a `result` tensor. + + Example: + // %lhs: [[1, 2], [3, 4]] + // %rhs: [[5, 6], [7, 8]] + %result = "ttnn.bitwise_xor"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32> + // %result: [[4, 4], [4, 12]] + }]; +} + def TTNN_MaximumOp : TTNN_ElementwiseBinaryOp<"maximum"> { let summary = "Eltwise maximum OP."; let description = [{ diff --git a/include/ttmlir/Target/TTNN/program.fbs b/include/ttmlir/Target/TTNN/program.fbs index 3c70bfedd..4ba5443ad 100644 --- a/include/ttmlir/Target/TTNN/program.fbs +++ b/include/ttmlir/Target/TTNN/program.fbs @@ -116,7 +116,12 @@ enum EltwiseOpType: uint32 { GreaterThan, LogicalAnd, LogicalOr, + LogicalXor, LogicalNot, + BitwiseAnd, + BitwiseOr, + BitwiseXor, + BitwiseNot, Cbrt, Minimum, Ceil, @@ -131,7 +136,6 @@ enum EltwiseOpType: uint32 { Floor, Where, Gelu, - LogicalXor, Clamp, LeakyRelu, Scatter, diff --git a/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp b/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp index 61a3154a1..b541d0a3e 100644 --- a/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp +++ b/lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp @@ -966,9 +966,16 @@ class StableHLOToTTIRConcatOpConversionPattern } }; -template -class StableHLOToTTIROpLogicalOpConversionPattern +class StableHLOToTTIRLogicalAndBitwiseOpConversionPattern : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; @@ -977,37 +984,49 @@ class StableHLOToTTIROpLogicalOpConversionPattern LogicalResult matchAndRewrite(SrcOp srcOp, Adaptor adaptor, ConversionPatternRewriter &rewriter) const override { - LogicalResult legalityResult = checkBasicLegality(srcOp, adaptor, rewriter); - if (!legalityResult.succeeded()) { - return legalityResult; - } - auto outputType = mlir::cast( this->getTypeConverter()->convertType(srcOp.getResult().getType())); + tensor::EmptyOp outputTensor = rewriter.create( srcOp.getLoc(), outputType.getShape(), outputType.getElementType()); - rewriter.replaceOpWithNewOp( - srcOp, - TypeRange( - this->getTypeConverter()->convertType(outputTensor.getType())), - adaptor.getOperands(), ValueRange(outputTensor)); + + if (getStableHLOOpType(srcOp) == StableHLOOpType::kLogical) { + replaceOpWithNewOp(srcOp, adaptor, outputTensor, rewriter); + } else { + replaceOpWithNewOp(srcOp, adaptor, outputTensor, rewriter); + } + return success(); } private: - LogicalResult checkBasicLegality(SrcOp srcOp, Adaptor adaptor, - ConversionPatternRewriter &rewriter) const { - if (mlir::cast(srcOp->getOperand(0).getType()) - .getElementTypeBitWidth() > 1 && - mlir::cast(srcOp->getOperand(1).getType()) - .getElementTypeBitWidth() > 1) { - llvm::errs() - << "error: TTIR does not support bitwise logical operation.\n"; - return rewriter.notifyMatchFailure( - srcOp, "TTIR does not support bitwise logical operation."); - } + enum StableHLOOpType { kLogical = 0, kBitwise = 1 }; + + // Determines stablehlo op type based on its operand types (i.e. their + // bit width). This assumes boolean operands are modeled as 1bit wide ints. + static StableHLOOpType getStableHLOOpType(const SrcOp &srcOp) { + // Checks if all operands are boolean (have bit width equal to 1). + bool allOperandsAreBoolean = std::all_of( + srcOp->operand_begin(), srcOp->operand_end(), [](auto operand) { + return mlir::cast(operand.getType()) + .getElementTypeBitWidth() == 1; + }); + + return allOperandsAreBoolean ? StableHLOOpType::kLogical + : StableHLOOpType::kBitwise; + } - return success(); + // Helper function to replace the operation with the new op to avoid code + // duplication. + template + void replaceOpWithNewOp(SrcOp srcOp, Adaptor adaptor, + tensor::EmptyOp outputTensor, + ConversionPatternRewriter &rewriter) const { + rewriter.replaceOpWithNewOp( + srcOp, + TypeRange( + this->getTypeConverter()->convertType(outputTensor.getType())), + adaptor.getOperands(), ValueRange(outputTensor)); } }; @@ -1885,20 +1904,21 @@ void addCCLOpsConversionPattern(MLIRContext *ctx, RewritePatternSet &patterns, ctx); } -void addLogicalOpConversionPattern(MLIRContext *ctx, - RewritePatternSet &patterns, - TypeConverter &typeConverter) { - patterns.add>(typeConverter, - ctx); - patterns.add>(typeConverter, - ctx); - patterns.add>(typeConverter, ctx); - patterns.add>(typeConverter, - ctx); +void addLogicalAndBitwiseOpsConversionPatterns(MLIRContext *ctx, + RewritePatternSet &patterns, + TypeConverter &typeConverter) { + patterns.add>(typeConverter, ctx); + patterns.add>(typeConverter, ctx); + patterns.add>(typeConverter, ctx); + patterns.add>(typeConverter, ctx); } void addSliceOpConversionPattern(MLIRContext *ctx, RewritePatternSet &patterns, @@ -1963,8 +1983,8 @@ void populateStableHLOToTTIRPatterns(MLIRContext *ctx, addCompareOpsConversionPatterns(ctx, patterns, typeConverter); addConcatOpsConversionPatterns(ctx, patterns, typeConverter); addReshapeOpConversionPattern(ctx, patterns, typeConverter); - addLogicalOpConversionPattern(ctx, patterns, typeConverter); addCCLOpsConversionPattern(ctx, patterns, typeConverter); + addLogicalAndBitwiseOpsConversionPatterns(ctx, patterns, typeConverter); addSliceOpConversionPattern(ctx, patterns, typeConverter); addClampOpConversionPattern(ctx, patterns, typeConverter); addGatherOpConversionPattern(ctx, patterns, typeConverter); diff --git a/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp b/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp index 945492481..a2b63a1bc 100644 --- a/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp +++ b/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp @@ -1141,6 +1141,10 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns, ElementwiseOpConversionPattern, ElementwiseOpConversionPattern, ElementwiseOpConversionPattern, + ElementwiseOpConversionPattern, + ElementwiseOpConversionPattern, + ElementwiseOpConversionPattern, + ElementwiseOpConversionPattern, ElementwiseOpConversionPattern, ElementwiseOpConversionPattern, ElementwiseOpConversionPattern, @@ -1173,7 +1177,7 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns, ReductionOpConversionPattern, ReductionOpConversionPattern, ReductionOpConversionPattern, - ElementwiseUnaryWithFloatParameterOpConversionPattern, + ElementwiseUnaryWithFloatParameterOpConversionPattern, EmbeddingOpConversionPattern, EmbeddingBackwardOpConversionPattern, SoftmaxOpConversionPattern, diff --git a/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp b/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp index c1a07b5fc..624ceddc3 100644 --- a/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp +++ b/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp @@ -681,6 +681,7 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx, DefaultOpConversionPattern, DefaultOpConversionPattern, DefaultOpConversionPattern, + DefaultOpConversionPattern, DefaultOpConversionPattern, DefaultOpConversionPattern, DefaultOpConversionPattern, @@ -703,11 +704,14 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx, // Eltwise binary ops // patterns.add, + EltwiseBinaryOpConversionPattern, + EltwiseBinaryOpConversionPattern, EltwiseBinaryOpConversionPattern, EltwiseBinaryOpConversionPattern, EltwiseBinaryOpConversionPattern, - EltwiseBinaryOpConversionPattern, - EltwiseBinaryOpConversionPattern, + DefaultOpConversionPattern, + DefaultOpConversionPattern, + DefaultOpConversionPattern, DefaultOpConversionPattern, DefaultOpConversionPattern, DefaultOpConversionPattern, diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 5fd09d5e4..b7ff1d7d5 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -610,6 +610,14 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) { type = ::tt::target::ttnn::EltwiseOpType::LogicalOr; } else if constexpr (std::is_same_v) { type = ::tt::target::ttnn::EltwiseOpType::LogicalXor; + } else if constexpr (std::is_same_v) { + type = ::tt::target::ttnn::EltwiseOpType::BitwiseAnd; + } else if constexpr (std::is_same_v) { + type = ::tt::target::ttnn::EltwiseOpType::BitwiseOr; + } else if constexpr (std::is_same_v) { + type = ::tt::target::ttnn::EltwiseOpType::BitwiseXor; + } else if constexpr (std::is_same_v) { + type = ::tt::target::ttnn::EltwiseOpType::BitwiseNot; } else if constexpr (std::is_same_v) { type = ::tt::target::ttnn::EltwiseOpType::Multiply; } else if constexpr (std::is_same_v) { @@ -912,16 +920,12 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op, return createOperation(cache, createEltwiseOp(cache, isFiniteOp), debugString, locInfo); } - if (auto andOp = dyn_cast(op); andOp) { - return createOperation(cache, createEltwiseOp(cache, andOp), debugString, - locInfo); - } if (auto cbrtOp = dyn_cast(op); cbrtOp) { return createOperation(cache, createEltwiseOp(cache, cbrtOp), debugString, locInfo); } - if (auto notOp = dyn_cast(op); notOp) { - return createOperation(cache, createEltwiseOp(cache, notOp), debugString, + if (auto andOp = dyn_cast(op); andOp) { + return createOperation(cache, createEltwiseOp(cache, andOp), debugString, locInfo); } if (auto orOp = dyn_cast(op); orOp) { @@ -932,6 +936,26 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op, return createOperation(cache, createEltwiseOp(cache, xorOp), debugString, locInfo); } + if (auto notOp = dyn_cast(op); notOp) { + return createOperation(cache, createEltwiseOp(cache, notOp), debugString, + locInfo); + } + if (auto bitwiseAndOp = dyn_cast(op); bitwiseAndOp) { + return createOperation(cache, createEltwiseOp(cache, bitwiseAndOp), + debugString, locInfo); + } + if (auto bitwiseOrOp = dyn_cast(op); bitwiseOrOp) { + return createOperation(cache, createEltwiseOp(cache, bitwiseOrOp), + debugString, locInfo); + } + if (auto bitwiseXorOp = dyn_cast(op); bitwiseXorOp) { + return createOperation(cache, createEltwiseOp(cache, bitwiseXorOp), + debugString, locInfo); + } + if (auto bitwiseNotOp = dyn_cast(op); bitwiseNotOp) { + return createOperation(cache, createEltwiseOp(cache, bitwiseNotOp), + debugString, locInfo); + } if (auto multiplyOp = dyn_cast(op); multiplyOp) { return createOperation(cache, createEltwiseOp(cache, multiplyOp), debugString, locInfo); diff --git a/runtime/lib/ttnn/operations/eltwise/binary/binary.cpp b/runtime/lib/ttnn/operations/eltwise/binary/binary.cpp index 40f80e259..aef7cb326 100644 --- a/runtime/lib/ttnn/operations/eltwise/binary/binary.cpp +++ b/runtime/lib/ttnn/operations/eltwise/binary/binary.cpp @@ -42,18 +42,6 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) { runEltwiseBinaryOp(op, tensorPool, ::ttnn::add); break; } - case ::tt::target::ttnn::EltwiseOpType::LogicalAnd: { - runEltwiseBinaryOp(op, tensorPool, ::ttnn::logical_and); - break; - } - case ::tt::target::ttnn::EltwiseOpType::LogicalOr: { - runEltwiseBinaryOp(op, tensorPool, ::ttnn::logical_or); - break; - } - case ::tt::target::ttnn::EltwiseOpType::LogicalXor: { - runEltwiseBinaryOp(op, tensorPool, ::ttnn::logical_xor); - break; - } case ::tt::target::ttnn::EltwiseOpType::Multiply: { runEltwiseBinaryOp(op, tensorPool, ::ttnn::multiply); break; @@ -90,6 +78,36 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) { runEltwiseBinaryOp(op, tensorPool, ::ttnn::divide); break; } + case ::tt::target::ttnn::EltwiseOpType::LogicalAnd: { + runEltwiseBinaryOp(op, tensorPool, ::ttnn::logical_and); + break; + } + case ::tt::target::ttnn::EltwiseOpType::LogicalOr: { + runEltwiseBinaryOp(op, tensorPool, ::ttnn::logical_or); + break; + } + case ::tt::target::ttnn::EltwiseOpType::LogicalXor: { + runEltwiseBinaryOp(op, tensorPool, ::ttnn::logical_xor); + break; + } + case ::tt::target::ttnn::EltwiseOpType::BitwiseAnd: { + LOG_ASSERT(false, "Binary bitwise_and op not supported in ttnn. See " + "https://github.com/tenstorrent/tt-metal/issues/13582"); + // runEltwiseBinaryOP(op, tensorPool, ::ttnn::bitwise_and); + break; + } + case ::tt::target::ttnn::EltwiseOpType::BitwiseOr: { + LOG_ASSERT(false, "Binary bitwise_or op not supported in ttnn. See " + "https://github.com/tenstorrent/tt-metal/issues/13582"); + // runEltwiseBinaryOP(op, tensorPool, ::ttnn::bitwise_or); + break; + } + case ::tt::target::ttnn::EltwiseOpType::BitwiseXor: { + LOG_ASSERT(false, "Binary bitwise_xor op not supported in ttnn. See " + "https://github.com/tenstorrent/tt-metal/issues/13582"); + // runEltwiseBinaryOP(op, tensorPool, ::ttnn::bitwise_xor); + break; + } default: LOG_FATAL("Unsupported Eltwise Binary operation"); } diff --git a/runtime/lib/ttnn/operations/eltwise/unary/unary.cpp b/runtime/lib/ttnn/operations/eltwise/unary/unary.cpp index 3ba13a4fd..50e62a9e6 100644 --- a/runtime/lib/ttnn/operations/eltwise/unary/unary.cpp +++ b/runtime/lib/ttnn/operations/eltwise/unary/unary.cpp @@ -151,6 +151,10 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) { runEltwiseUnaryWithFloatParameterOp(op, tensorPool, ::ttnn::leaky_relu); break; } + case ::tt::target::ttnn::EltwiseOpType::BitwiseNot: { + runEltwiseUnaryOp(op, tensorPool, ::ttnn::bitwise_not); + break; + } default: LOG_FATAL("Unsupported unary operation"); } diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/binary/bitwise_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/binary/bitwise_op.mlir new file mode 100644 index 000000000..3b1d6fcc1 --- /dev/null +++ b/test/ttmlir/Conversion/StableHLOToTTIR/binary/bitwise_op.mlir @@ -0,0 +1,36 @@ +// REQUIRES: stablehlo +// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s + +module @jit_eltwise_bitwise attributes {} { + func.func public @bitwise_and(%arg0: tensor<32x32xi32>, %arg1: tensor<32x32xi32>) -> tensor<32x32xi32> { + %0 = stablehlo.and %arg0, %arg1 : tensor<32x32xi32> + // CHECK: %[[EMPTY:[0-9]+]] = tensor.empty() : tensor<32x32xi32> + // CHECK: %[[AND:[0-9]+]] = "ttir.bitwise_and"(%arg0, %arg1, %[[EMPTY]]){{.*}} -> tensor<32x32xi32> + return %0 : tensor<32x32xi32> + // CHECK: return %[[AND]] : tensor<32x32xi32> + } + + func.func public @bitwise_or(%arg0: tensor<32x32xi32>, %arg1: tensor<32x32xi32>) -> tensor<32x32xi32> { + %0 = stablehlo.or %arg0, %arg1 : tensor<32x32xi32> + // CHECK: %[[EMPTY:[0-9]+]] = tensor.empty() : tensor<32x32xi32> + // CHECK: %[[OR:[0-9]+]] = "ttir.bitwise_or"(%arg0, %arg1, %[[EMPTY]]){{.*}} -> tensor<32x32xi32> + return %0 : tensor<32x32xi32> + // CHECK: return %[[OR]] : tensor<32x32xi32> + } + + func.func public @bitwise_xor(%arg0: tensor<32x32xi32>, %arg1: tensor<32x32xi32>) -> tensor<32x32xi32> { + %0 = stablehlo.xor %arg0, %arg1 : tensor<32x32xi32> + // CHECK: %[[EMPTY:[0-9]+]] = tensor.empty() : tensor<32x32xi32> + // CHECK: %[[XOR:[0-9]+]] = "ttir.bitwise_xor"(%arg0, %arg1, %[[EMPTY]]){{.*}} -> tensor<32x32xi32> + return %0 : tensor<32x32xi32> + // CHECK: return %[[XOR]] : tensor<32x32xi32> + } + + func.func public @bitwise_not(%arg0: tensor<32x32xi32>) -> tensor<32x32xi32> { + %0 = stablehlo.not %arg0 : tensor<32x32xi32> + // CHECK: %[[EMPTY:[0-9]+]] = tensor.empty() : tensor<32x32xi32> + // CHECK: %[[NOT:[0-9]+]] = "ttir.bitwise_not"(%arg0, %[[EMPTY]]){{.*}} -> tensor<32x32xi32> + return %0 : tensor<32x32xi32> + // CHECK: return %[[NOT]] : tensor<32x32xi32> + } +} diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_and/simple_bitwise_and.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_and/simple_bitwise_and.mlir new file mode 100644 index 000000000..4f1100ab3 --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_and/simple_bitwise_and.mlir @@ -0,0 +1,12 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s + +module attributes {} { + func.func @bitwise_and(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_and"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +} diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_or/simple_bitwise_or.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_or/simple_bitwise_or.mlir new file mode 100644 index 000000000..eb0e9f7f1 --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_or/simple_bitwise_or.mlir @@ -0,0 +1,12 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s + +module attributes {} { + func.func @bitwise_or(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_or"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +} diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_xor/simple_bitwise_xor.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_xor/simple_bitwise_xor.mlir new file mode 100644 index 000000000..d05ad2ccf --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_xor/simple_bitwise_xor.mlir @@ -0,0 +1,12 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s + +module attributes {} { + func.func @bitwise_xor(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_xor"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +} diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/bitwise_not/simple_bitwise_not.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/bitwise_not/simple_bitwise_not.mlir new file mode 100644 index 000000000..04a5c1550 --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/bitwise_not/simple_bitwise_not.mlir @@ -0,0 +1,12 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s + +module attributes {} { + func.func @bitwise_not(%arg0: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_not"({{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +} diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_bitwise_binary.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_bitwise_binary.mlir new file mode 100644 index 000000000..6f3fdf240 --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_bitwise_binary.mlir @@ -0,0 +1,34 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +// TODO(kmitrovic): Enable when binary bitwise ops are supported and merge with unary into one file. +// UNSUPPORTED: true + +module attributes {} { + func.func @bitwise_and(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_and"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } + + func.func @bitwise_or(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_or"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } + + func.func @bitwise_xor(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_xor"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +} diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_bitwise_unary.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_bitwise_unary.mlir new file mode 100644 index 000000000..e0f46935a --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_bitwise_unary.mlir @@ -0,0 +1,16 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +// TODO(kmitrovic): Failing due to https://github.com/tenstorrent/tt-mlir/issues/1571 +// UNSUPPORTED: true + +module attributes {} { + func.func @bitwise_not(%arg0: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_not"({{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +} diff --git a/test/ttmlir/Silicon/TTNN/simple_bitwise_binary.mlir b/test/ttmlir/Silicon/TTNN/simple_bitwise_binary.mlir new file mode 100644 index 000000000..6f3fdf240 --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/simple_bitwise_binary.mlir @@ -0,0 +1,34 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +// TODO(kmitrovic): Enable when binary bitwise ops are supported and merge with unary into one file. +// UNSUPPORTED: true + +module attributes {} { + func.func @bitwise_and(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_and"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } + + func.func @bitwise_or(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_or"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } + + func.func @bitwise_xor(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_xor"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +} diff --git a/test/ttmlir/Silicon/TTNN/simple_bitwise_unary.mlir b/test/ttmlir/Silicon/TTNN/simple_bitwise_unary.mlir new file mode 100644 index 000000000..e0f46935a --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/simple_bitwise_unary.mlir @@ -0,0 +1,16 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +// TODO(kmitrovic): Failing due to https://github.com/tenstorrent/tt-mlir/issues/1571 +// UNSUPPORTED: true + +module attributes {} { + func.func @bitwise_not(%arg0: tensor<64x128xi32>) -> tensor<64x128xi32> { + %0 = tensor.empty() : tensor<64x128xi32> + // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xi32, {{.*}} + %1 = "ttir.bitwise_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: {{.*}} "ttnn.bitwise_not"({{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xi32, {{.*}} + return %1 : tensor<64x128xi32> + // CHECK: return {{.*}} tensor<64x128xi32, {{.*}} + } +}