Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add conversions for stablehlo ceil, sine and cosine Ops #939

Merged
merged 7 commits into from
Oct 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 37 additions & 17 deletions include/ttmlir/Dialect/TTIR/IR/TTIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -196,25 +196,24 @@ def TTIR_CbrtOp: TTIR_ElementwiseUnaryOp<"cbrt"> {
}];
}


def TTIR_TypecastOp: TTIR_ElementwiseUnaryOp<"typecast"> {
let summary = "Eltwise cast op.";
def TTIR_CeilOp: TTIR_ElementwiseUnaryOp<"ceil"> {
let summary = "Eltwise ceil op.";
let description = [{
Eltwise cast operation.
Eltwise ceil operation.
}];
}

def TTIR_SqrtOp : TTIR_ElementwiseUnaryOp<"sqrt"> {
let summary = "Eltwise square root.";
def TTIR_CosOp: TTIR_ElementwiseUnaryOp<"cos"> {
let summary = "Eltwise cosine op.";
let description = [{
Eltwise square root operation.
Eltwise cosine operation.
}];
}

def TTIR_RsqrtOp : TTIR_ElementwiseUnaryOp<"rsqrt"> {
let summary = "Eltwise reciprocal square root.";
def TTIR_LogicalNotOp: TTIR_ElementwiseUnaryOp<"logical_not"> {
let summary = "Eltwise logical not op.";
let description = [{
Eltwise reciprocal square root operation.
Eltwise logical not operation.
}];
}

Expand All @@ -225,13 +224,6 @@ def TTIR_NegOp: TTIR_ElementwiseUnaryOp<"neg"> {
}];
}

def TTIR_LogicalNotOp: TTIR_ElementwiseUnaryOp<"logical_not"> {
let summary = "Eltwise logical not op.";
let description = [{
Eltwise logical not operation.
}];
}

uazizTT marked this conversation as resolved.
Show resolved Hide resolved
def TTIR_ReciprocalOp : TTIR_ElementwiseUnaryOp<"reciprocal"> {
let summary = "Eltwise reciprocal.";
let description = [{
Expand All @@ -246,13 +238,41 @@ def TTIR_ReluOp : TTIR_ElementwiseUnaryOp<"relu"> {
}];
}

def TTIR_RsqrtOp : TTIR_ElementwiseUnaryOp<"rsqrt"> {
let summary = "Eltwise reciprocal square root.";
let description = [{
Eltwise reciprocal square root operation.
}];
}

def TTIR_SigmoidOp: TTIR_ElementwiseUnaryOp<"sigmoid"> {
let summary = "Eltwise sigmoid.";
let description = [{
Eltwise sigmoid operation.
}];
}

def TTIR_SinOp: TTIR_ElementwiseUnaryOp<"sin"> {
let summary = "Eltwise sine.";
let description = [{
Eltwise sine operation.
}];
}

def TTIR_SqrtOp : TTIR_ElementwiseUnaryOp<"sqrt"> {
let summary = "Eltwise square root.";
let description = [{
Eltwise square root operation.
}];
}

def TTIR_TypecastOp: TTIR_ElementwiseUnaryOp<"typecast"> {
let summary = "Eltwise cast op.";
let description = [{
Eltwise cast operation.
}];
}

class TTIR_ElementwiseBinaryOp<string mnemonic, list<Trait> traits = []> :
TTIR_ElementwiseOp<mnemonic, traits> {
let summary = "Eltwise binary op.";
Expand Down
51 changes: 36 additions & 15 deletions include/ttmlir/Dialect/TTNN/IR/TTNNOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -176,24 +176,24 @@ def TTNN_CbrtOp : TTNN_ElementwiseUnaryOp<"cbrt"> {
}];
}

def TTNN_SqrtOp : TTNN_ElementwiseUnaryOp<"sqrt"> {
let summary = "Eltwise sqrt.";
def TTNN_CeilOp : TTNN_ElementwiseUnaryOp<"ceil"> {
let summary = "Eltwise ceil.";
let description = [{
Eltwise sqrt operation.
Eltwise ceil operation.
}];
}

def TTNN_RsqrtOp : TTNN_ElementwiseUnaryOp<"rsqrt"> {
let summary = "Eltwise rsqrt.";
def TTNN_CosOp : TTNN_ElementwiseUnaryOp<"cos"> {
let summary = "Eltwise cosine.";
let description = [{
Eltwise rsqrt operation.
Eltwise cosine operation.
}];
}

def TTNN_NegOp : TTNN_ElementwiseUnaryOp<"neg"> {
let summary = "Eltwise negate.";
def TTNN_ExpOp : TTNN_ElementwiseUnaryOp<"exp"> {
let summary = "Eltwise exponential.";
let description = [{
Eltwise negate operation.
Eltwise exponential operation.
}];
}

Expand All @@ -204,6 +204,13 @@ def TTNN_LogicalNotOp: TTNN_ElementwiseUnaryOp<"logical_not"> {
}];
}

def TTNN_NegOp : TTNN_ElementwiseUnaryOp<"neg"> {
let summary = "Eltwise negate.";
let description = [{
Eltwise negate operation.
}];
}

def TTNN_ReciprocalOp : TTNN_ElementwiseUnaryOp<"reciprocal"> {
let summary = "Eltwise reciprocal.";
let description = [{
Expand All @@ -218,17 +225,31 @@ def TTNN_ReluOp : TTNN_ElementwiseUnaryOp<"relu"> {
}];
}

def TTNN_SigmoidOp : TTNN_ElementwiseUnaryOp<"sigmoid"> {
let summary = "Eltwise sigmoid.";
def TTNN_SinOp : TTNN_ElementwiseUnaryOp<"sin"> {
let summary = "Eltwise sine.";
let description = [{
Eltwise sigmoid operation.
Eltwise sine operation.
}];
}

def TTNN_ExpOp : TTNN_ElementwiseUnaryOp<"exp"> {
let summary = "Eltwise exponential.";
def TTNN_SqrtOp : TTNN_ElementwiseUnaryOp<"sqrt"> {
let summary = "Eltwise sqrt.";
let description = [{
Eltwise exponential operation.
Eltwise sqrt operation.
}];
}

def TTNN_RsqrtOp : TTNN_ElementwiseUnaryOp<"rsqrt"> {
let summary = "Eltwise rsqrt.";
let description = [{
Eltwise rsqrt operation.
}];
}

def TTNN_SigmoidOp : TTNN_ElementwiseUnaryOp<"sigmoid"> {
let summary = "Eltwise sigmoid.";
let description = [{
Eltwise sigmoid operation.
}];
}

Expand Down
3 changes: 3 additions & 0 deletions include/ttmlir/Target/TTNN/program.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ enum EltwiseOpType: uint32 {
LogicalNot = 22,
Cbrt = 23,
Minimum = 24,
Ceil = 25,
Sin = 26,
Cos = 27
}

table EltwiseOp {
Expand Down
6 changes: 6 additions & 0 deletions lib/Conversion/StableHLOToTTIR/StableHLOToTTIRPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -863,12 +863,18 @@ void addElementwiseUnaryOpsConversionPatterns(MLIRContext *ctx,
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::ConvertOp, mlir::tt::ttir::TypecastOp>>(typeConverter,
ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::CeilOp, mlir::tt::ttir::CeilOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::CosineOp, mlir::tt::ttir::CosOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::ExpOp, mlir::tt::ttir::ExpOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::NegOp, mlir::tt::ttir::NegOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::RsqrtOp, mlir::tt::ttir::RsqrtOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::SineOp, mlir::tt::ttir::SinOp>>(typeConverter, ctx);
patterns.add<StableHLOToTTIROpDefaultConversionPattern<
mlir::stablehlo::SqrtOp, mlir::tt::ttir::SqrtOp>>(typeConverter, ctx);
}
Expand Down
3 changes: 3 additions & 0 deletions lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -829,6 +829,9 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns,
ElementwiseOpConversionPattern<ttir::ReciprocalOp, ttnn::ReciprocalOp>,
ElementwiseOpConversionPattern<ttir::ExpOp, ttnn::ExpOp>,
ElementwiseOpConversionPattern<ttir::DivOp, ttnn::DivOp>,
ElementwiseOpConversionPattern<ttir::CeilOp, ttnn::CeilOp>,
ElementwiseOpConversionPattern<ttir::SinOp, ttnn::SinOp>,
ElementwiseOpConversionPattern<ttir::CosOp, ttnn::CosOp>,
ReductionOpConversionPattern<ttir::SumOp, ttnn::SumOp>,
ReductionOpConversionPattern<ttir::MeanOp, ttnn::MeanOp>,
ReductionOpConversionPattern<ttir::MaxOp, ttnn::MaxOp>,
Expand Down
5 changes: 4 additions & 1 deletion lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -623,7 +623,10 @@ void populateTTNNToEmitCPatterns(mlir::MLIRContext *ctx,
DefaultOpConversionPattern<ttnn::RsqrtOp>,
DefaultOpConversionPattern<ttnn::SigmoidOp>,
DefaultOpConversionPattern<ttnn::ReciprocalOp>,
DefaultOpConversionPattern<ttnn::ExpOp>>(typeConverter, ctx);
DefaultOpConversionPattern<ttnn::ExpOp>,
DefaultOpConversionPattern<ttnn::CeilOp>,
DefaultOpConversionPattern<ttnn::SinOp>,
DefaultOpConversionPattern<ttnn::CosOp>>(typeConverter, ctx);

// Eltwise binary ops
//
Expand Down
15 changes: 15 additions & 0 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,12 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) {
type = ::tt::target::ttnn::EltwiseOpType::Sigmoid;
} else if constexpr (std::is_same_v<EltwiseOp, ExpOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Exp;
} else if constexpr (std::is_same_v<EltwiseOp, CeilOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Ceil;
} else if constexpr (std::is_same_v<EltwiseOp, CosOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Cos;
} else if constexpr (std::is_same_v<EltwiseOp, SinOp>) {
type = ::tt::target::ttnn::EltwiseOpType::Sin;
} else {
llvm_unreachable("unhandled EltwiseOp");
}
Expand Down Expand Up @@ -646,6 +652,15 @@ emitTTNNOperation(FlatbufferObjectCache &cache, Operation *op,
return createOperation(cache, createDeallocOp(cache, deallocOp),
debugString);
}
if (auto ceilOp = dyn_cast<CeilOp>(op); ceilOp) {
return createOperation(cache, createEltwiseOp(cache, ceilOp), debugString);
}
if (auto cosOp = dyn_cast<CosOp>(op); cosOp) {
return createOperation(cache, createEltwiseOp(cache, cosOp), debugString);
}
if (auto sinOp = dyn_cast<SinOp>(op); sinOp) {
return createOperation(cache, createEltwiseOp(cache, sinOp), debugString);
}

llvm_unreachable("unhandled op in emitTTNNOperation");
}
Expand Down
12 changes: 12 additions & 0 deletions runtime/lib/ttnn/operations/eltwise/unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,14 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryCompositeOP(op, tensorPool, ::ttnn::cbrt);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Ceil: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::ceil);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Cos: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::cos);
break;
}
case ::tt::target::ttnn::EltwiseOpType::LogicalNot: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::logical_not);
break;
Expand All @@ -108,6 +116,10 @@ void run(const ::tt::target::ttnn::EltwiseOp *op, ProgramContext &context) {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::sigmoid);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Sin: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::sin);
break;
}
case ::tt::target::ttnn::EltwiseOpType::Reciprocal: {
runEltwiseUnaryOP(op, tensorPool, ::ttnn::reciprocal);
break;
Expand Down
10 changes: 10 additions & 0 deletions test/ttmlir/Conversion/StableHLOToTTIR/unary/ceil_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
module @jit_eltwise_ceil attributes {} {
func.func public @test_ceil(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.ceil %arg0 : tensor<13x21x3xf32>
// CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]]
// CHECK: [[VAL1:%[0-9]+]] = "ttir.ceil"(%arg0, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]]
return %0 : tensor<13x21x3xf32>
}
}
10 changes: 10 additions & 0 deletions test/ttmlir/Conversion/StableHLOToTTIR/unary/cosine_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
module @jit_eltwise_cosine attributes {} {
func.func public @test_cosine(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.cosine %arg0 : tensor<13x21x3xf32>
// CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]]
// CHECK: [[VAL1:%[0-9]+]] = "ttir.cos"(%arg0, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]]
return %0 : tensor<13x21x3xf32>
}
}
10 changes: 10 additions & 0 deletions test/ttmlir/Conversion/StableHLOToTTIR/unary/sine_op.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// REQUIRES: stablehlo
// RUN: ttmlir-opt --stablehlo-to-ttir-pipeline %s | FileCheck %s
module @jit_eltwise_sine attributes {} {
func.func public @test_sine(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = stablehlo.sine %arg0 : tensor<13x21x3xf32>
// CHECK: [[VAL0:%[0-9]+]] = tensor.empty() : [[TENSOR_SIZE:tensor<[0-9]+x[0-9]+x[0-9]+xf[0-9]+>]]
// CHECK: [[VAL1:%[0-9]+]] = "ttir.sin"(%arg0, [[VAL0]]) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device_tile, #any_device_tile]}> : ([[TENSOR_SIZE]], [[TENSOR_SIZE]]) -> [[TENSOR_SIZE]]
return %0 : tensor<13x21x3xf32>
}
}
11 changes: 11 additions & 0 deletions test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.ceil"[[C:.*]]
%1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
}
11 changes: 11 additions & 0 deletions test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.cos"[[C:.*]]
%1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
}
11 changes: 11 additions & 0 deletions test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
module attributes {} {
func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.sin"[[C:.*]]
%1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
}
uazizTT marked this conversation as resolved.
Show resolved Hide resolved
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_ceil.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.ceil"[[C:.*]]
%1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_cosine.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.cos"[[C:.*]]
%1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
13 changes: 13 additions & 0 deletions test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sine.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir
// RUN: FileCheck %s --input-file=%t.mlir
// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn
#any_device = #tt.operand_constraint<dram|l1|scalar|tile|any_device|any_device_tile>
#any_device_tile = #tt.operand_constraint<dram|l1|tile|any_device_tile>

func.func @sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
// CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]]
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: %[[C:.*]] = "ttnn.sin"[[C:.*]]
%1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array<i32: 1, 1>, operand_constraints = [#any_device, #any_device]}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
Loading
Loading