diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/reduce_add_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/reduce_add_op.mlir index 66f3ce4e1..b564bfd5b 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/reduce_add_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/reduce_add_op.mlir @@ -4,7 +4,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_4to3dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor<128x32x4xf32> { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<128x32x4xf32> @@ -15,7 +15,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_4to2dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor<128x32xf32> { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [1 : i32, 3 : i32] + // CHECK-SAME: dim = [1 : i32, 3 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<128x32xf32> @@ -26,7 +26,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_4to1dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [1 : i32, 2 : i32, 3 : i32] + // CHECK-SAME: dim = [1 : i32, 2 : i32, 3 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<128xf32> @@ -37,7 +37,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_4to0dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [0 : i32, 1 : i32, 2 : i32, 3 : i32] + // CHECK-SAME: dim = [0 : i32, 1 : i32, 2 : i32, 3 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<1xf32> @@ -48,7 +48,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_3to2dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128x4xf32> { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x4xf32> // CHECK-SAME: -> tensor<128x4xf32> @@ -59,7 +59,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_3to1dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [1 : i32, 2 : i32] + // CHECK-SAME: dim = [1 : i32, 2 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x4xf32> // CHECK-SAME: -> tensor<128xf32> @@ -70,7 +70,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_3to0dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [0 : i32, 1 : i32, 2 : i32] + // CHECK-SAME: dim = [0 : i32, 1 : i32, 2 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x4xf32> // CHECK-SAME: -> tensor<1xf32> @@ -81,7 +81,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_2to1dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10xf32> // CHECK-SAME: -> tensor<128xf32> @@ -92,7 +92,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_2to0dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [0 : i32, 1 : i32] + // CHECK-SAME: dim = [0 : i32, 1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10xf32> // CHECK-SAME: -> tensor<1xf32> @@ -103,7 +103,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_1to0dim(%arg0: tensor<128xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.sum" - // CHECK-SAME: dim_arg = [0 : i32] + // CHECK-SAME: dim = [0 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128xf32> // CHECK-SAME: -> tensor<1xf32> diff --git a/test/ttmlir/Conversion/StableHLOToTTIR/reduce_maximum_op.mlir b/test/ttmlir/Conversion/StableHLOToTTIR/reduce_maximum_op.mlir index 81fb59bfb..21226ab1c 100644 --- a/test/ttmlir/Conversion/StableHLOToTTIR/reduce_maximum_op.mlir +++ b/test/ttmlir/Conversion/StableHLOToTTIR/reduce_maximum_op.mlir @@ -4,7 +4,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_4to3dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor<128x32x4xf32> { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<128x32x4xf32> @@ -15,7 +15,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_4to2dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor<128x32xf32> { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [1 : i32, 3 : i32] + // CHECK-SAME: dim = [1 : i32, 3 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<128x32xf32> @@ -26,7 +26,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_4to1dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [1 : i32, 2 : i32, 3 : i32] + // CHECK-SAME: dim = [1 : i32, 2 : i32, 3 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<128xf32> @@ -37,7 +37,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_4to0dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [0 : i32, 1 : i32, 2 : i32, 3 : i32] + // CHECK-SAME: dim = [0 : i32, 1 : i32, 2 : i32, 3 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x32x4xf32> // CHECK-SAME: -> tensor<1xf32> @@ -48,7 +48,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_3to2dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128x4xf32> { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x4xf32> // CHECK-SAME: -> tensor<128x4xf32> @@ -59,7 +59,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_3to1dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [1 : i32, 2 : i32] + // CHECK-SAME: dim = [1 : i32, 2 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x4xf32> // CHECK-SAME: -> tensor<128xf32> @@ -70,7 +70,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_3to0dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [0 : i32, 1 : i32, 2 : i32] + // CHECK-SAME: dim = [0 : i32, 1 : i32, 2 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10x4xf32> // CHECK-SAME: -> tensor<1xf32> @@ -81,7 +81,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_2to1dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10xf32> // CHECK-SAME: -> tensor<128xf32> @@ -92,7 +92,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_2to0dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [0 : i32, 1 : i32] + // CHECK-SAME: dim = [0 : i32, 1 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128x10xf32> // CHECK-SAME: -> tensor<1xf32> @@ -103,7 +103,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_1to0dim(%arg0: tensor<128xf32>, %cst_0: tensor) -> tensor { // CHECK: tensor.empty // CHECK: "ttir.max" - // CHECK-SAME: dim_arg = [0 : i32] + // CHECK-SAME: dim = [0 : i32] // CHECK-SAME: keep_dim = false // CHECK-SAME: tensor<128xf32> // CHECK-SAME: -> tensor<1xf32> diff --git a/test/ttmlir/Dialect/TTNN/simple_mean.mlir b/test/ttmlir/Dialect/TTNN/simple_mean.mlir index efcba0a13..c968b47cc 100644 --- a/test/ttmlir/Dialect/TTNN/simple_mean.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_mean.mlir @@ -3,7 +3,7 @@ module { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { %0 = tensor.empty() : tensor<512x32xbf16> // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> + %1 = "ttir.mean"(%arg0, %0) <{dim = [-1: i32], keep_dim = true}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> return %1 : tensor<512x32xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_sum.mlir b/test/ttmlir/Dialect/TTNN/simple_sum.mlir index 2b107b068..ac5ef8b1b 100644 --- a/test/ttmlir/Dialect/TTNN/simple_sum.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_sum.mlir @@ -3,7 +3,7 @@ module attributes {} { func.func @forward(%arg0: tensor<512x1024xbf16>) -> tensor<512x32xbf16> { %0 = tensor.empty() : tensor<512x32xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim = [-1: i32], keep_dim = true}> : (tensor<512x1024xbf16>, tensor<512x32xbf16>) -> tensor<512x32xbf16> return %1 : tensor<512x32xbf16> } } diff --git a/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir b/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir index 89f51123e..30a8df951 100644 --- a/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/reduce_add_op.mlir @@ -13,7 +13,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_4to0dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.sum" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x32x4xf32, // CHECK-SAME: -> tensor<1x1x1x1xf32, @@ -27,7 +27,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_3to2dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128x4xf32> { // CHECK: "ttnn.sum" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x4xf32, // CHECK-SAME: -> tensor<128x1x4xf32, @@ -41,7 +41,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_3to1dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: "ttnn.sum" - // CHECK-SAME: dim_arg = [1 : i32, 2 : i32] + // CHECK-SAME: dim = [1 : i32, 2 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x4xf32, // CHECK-SAME: -> tensor<128x1x1xf32, @@ -55,7 +55,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_3to0dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.sum" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x4xf32, // CHECK-SAME: -> tensor<1x1x1xf32, @@ -69,7 +69,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_2to1dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: "ttnn.sum" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, @@ -83,7 +83,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_2to0dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.sum" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<1x1xf32, @@ -97,7 +97,7 @@ module @jit_reduce_add attributes {} { func.func public @test_reduce_add_1to0dim(%arg0: tensor<128xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.sum" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128xf32, // CHECK-SAME: -> tensor<1xf32, diff --git a/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir b/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir index 8ee57fd52..93da96751 100644 --- a/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/reduce_maximum_op.mlir @@ -13,7 +13,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_4to0dim(%arg0: tensor<128x10x32x4xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.max" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x32x4xf32, // CHECK-SAME: -> tensor<1x1x1x1xf32, @@ -27,7 +27,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_3to2dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128x4xf32> { // CHECK: "ttnn.max" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x4xf32, // CHECK-SAME: -> tensor<128x1x4xf32, @@ -41,7 +41,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_3to1dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: "ttnn.max" - // CHECK-SAME: dim_arg = [1 : i32, 2 : i32] + // CHECK-SAME: dim = [1 : i32, 2 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x4xf32, // CHECK-SAME: -> tensor<128x1x1xf32, @@ -55,7 +55,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_3to0dim(%arg0: tensor<128x10x4xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.max" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10x4xf32, // CHECK-SAME: -> tensor<1x1x1xf32, @@ -69,7 +69,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_2to1dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor<128xf32> { // CHECK: "ttnn.max" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, @@ -83,7 +83,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_2to0dim(%arg0: tensor<128x10xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.max" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<1x1xf32, @@ -97,7 +97,7 @@ module @jit_reduce_maximum attributes {} { func.func public @test_reduce_maximum_1to0dim(%arg0: tensor<128xf32>, %cst_0: tensor) -> tensor { // CHECK: "ttnn.max" - // CHECK-NOT: dim_arg + // CHECK-NOT: dim // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128xf32, // CHECK-SAME: -> tensor<1xf32, diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir index 1011fad89..f13d8cb20 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_max.mlir @@ -4,6 +4,6 @@ func.func @max(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.max"(%arg0, %0) <{dim = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir index f0beb34b2..d4cc5fce1 100644 --- a/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir +++ b/test/ttmlir/Silicon/TTNN/perf_unit/test_perf_sum.mlir @@ -4,6 +4,6 @@ func.func @sum(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/simple_max.mlir b/test/ttmlir/Silicon/TTNN/simple_max.mlir index 8ec3bdc59..94a43ec9f 100644 --- a/test/ttmlir/Silicon/TTNN/simple_max.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_max.mlir @@ -13,7 +13,7 @@ module { func.func public @reduce_not_keep_dim(%arg0: tensor<128x10xf32>) -> tensor<128xf32> { %0 = tensor.empty() : tensor<128xf32> // CHECK: "ttnn.max" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, @@ -21,19 +21,19 @@ module { // CHECK-SAME: shape = [128 : i32] // CHECK-SAME: tensor<128x1xf32, // CHECK-SAME: -> tensor<128xf32, - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [1 : i32], keep_dim = false}> : (tensor<128x10xf32>, tensor<128xf32>) -> tensor<128xf32> + %1 = "ttir.max"(%arg0, %0) <{dim = [1 : i32], keep_dim = false}> : (tensor<128x10xf32>, tensor<128xf32>) -> tensor<128xf32> return %1 : tensor<128xf32> } func.func public @reduce_keep_dim(%arg0: tensor<128x10xf32>) -> tensor<128x1xf32> { %0 = tensor.empty() : tensor<128x1xf32> // CHECK: "ttnn.max" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, // CHECK-NOT: "ttnn.reshape" - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [1 : i32], keep_dim = true}> : (tensor<128x10xf32>, tensor<128x1xf32>) -> tensor<128x1xf32> + %1 = "ttir.max"(%arg0, %0) <{dim = [1 : i32], keep_dim = true}> : (tensor<128x10xf32>, tensor<128x1xf32>) -> tensor<128x1xf32> return %1 : tensor<128x1xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/simple_mean.mlir b/test/ttmlir/Silicon/TTNN/simple_mean.mlir index 8b60dee25..8d6e9ffb9 100644 --- a/test/ttmlir/Silicon/TTNN/simple_mean.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_mean.mlir @@ -13,7 +13,7 @@ module { func.func public @reduce_not_keep_dim(%arg0: tensor<128x10xf32>) -> tensor<128xf32> { %0 = tensor.empty() : tensor<128xf32> // CHECK: "ttnn.mean" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, @@ -21,19 +21,19 @@ module { // CHECK-SAME: shape = [128 : i32] // CHECK-SAME: tensor<128x1xf32, // CHECK-SAME: -> tensor<128xf32, - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [1 : i32], keep_dim = false}> : (tensor<128x10xf32>, tensor<128xf32>) -> tensor<128xf32> + %1 = "ttir.mean"(%arg0, %0) <{dim = [1 : i32], keep_dim = false}> : (tensor<128x10xf32>, tensor<128xf32>) -> tensor<128xf32> return %1 : tensor<128xf32> } func.func public @reduce_keep_dim(%arg0: tensor<128x10xf32>) -> tensor<128x1xf32> { %0 = tensor.empty() : tensor<128x1xf32> // CHECK: "ttnn.mean" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, // CHECK-NOT: "ttnn.reshape" - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [1 : i32], keep_dim = true}> : (tensor<128x10xf32>, tensor<128x1xf32>) -> tensor<128x1xf32> + %1 = "ttir.mean"(%arg0, %0) <{dim = [1 : i32], keep_dim = true}> : (tensor<128x10xf32>, tensor<128x1xf32>) -> tensor<128x1xf32> return %1 : tensor<128x1xf32> } diff --git a/test/ttmlir/Silicon/TTNN/simple_reductions.mlir b/test/ttmlir/Silicon/TTNN/simple_reductions.mlir index 28eaf47fa..502af6b0b 100644 --- a/test/ttmlir/Silicon/TTNN/simple_reductions.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_reductions.mlir @@ -4,47 +4,47 @@ func.func @sum(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } func.func @sum_last_2_dims(%arg0: tensor<1x32x512x64xbf16>) -> tensor<1x32xbf16> { %0 = tensor.empty() : tensor<1x32xbf16> // CHECK: %[[C:.*]] = "ttnn.sum"[[C:.*]] - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> + %1 = "ttir.sum"(%arg0, %0) <{dim = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> return %1 : tensor<1x32xbf16> } func.func @sum_first_dim(%arg0: tensor<64x10xf32>) -> tensor<1x10xf32> { %0 = tensor.empty() : tensor<1x10xf32> - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [-2 : i32], keep_dim = true}> : (tensor<64x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> + %1 = "ttir.sum"(%arg0, %0) <{dim = [-2 : i32], keep_dim = true}> : (tensor<64x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> return %1: tensor<1x10xf32> } func.func @mean(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.mean"(%arg0, %0) <{dim = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } func.func @mean_last_2_dims(%arg0: tensor<1x32x512x64xbf16>) -> tensor<1x32xbf16> { %0 = tensor.empty() : tensor<1x32xbf16> // CHECK: %[[C:.*]] = "ttnn.mean"[[C:.*]] - %1 = "ttir.mean"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> + %1 = "ttir.mean"(%arg0, %0) <{dim = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> return %1 : tensor<1x32xbf16> } func.func @max(%arg0: tensor<1x1x512x64xbf16>) -> tensor<1x1x512xbf16> { %0 = tensor.empty() : tensor<1x1x512xbf16> // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> + %1 = "ttir.max"(%arg0, %0) <{dim = [-1: i32], keep_dim = true}> : (tensor<1x1x512x64xbf16>, tensor<1x1x512xbf16>) -> tensor<1x1x512xbf16> return %1 : tensor<1x1x512xbf16> } func.func @max_last_2_dims(%arg0: tensor<1x32x512x64xbf16>) -> tensor<1x32xbf16> { %0 = tensor.empty() : tensor<1x32xbf16> // CHECK: %[[C:.*]] = "ttnn.max"[[C:.*]] - %1 = "ttir.max"(%arg0, %0) <{dim_arg = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> + %1 = "ttir.max"(%arg0, %0) <{dim = [-1: i32, -2: i32], keep_dim = true}> : (tensor<1x32x512x64xbf16>, tensor<1x32xbf16>) -> tensor<1x32xbf16> return %1 : tensor<1x32xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/simple_sum.mlir b/test/ttmlir/Silicon/TTNN/simple_sum.mlir index cb1904a34..143e67a41 100644 --- a/test/ttmlir/Silicon/TTNN/simple_sum.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_sum.mlir @@ -13,7 +13,7 @@ module { func.func public @reduce_not_keep_dim(%arg0: tensor<128x10xf32>) -> tensor<128xf32> { %0 = tensor.empty() : tensor<128xf32> // CHECK: "ttnn.sum" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, @@ -21,19 +21,19 @@ module { // CHECK-SAME: shape = [128 : i32] // CHECK-SAME: tensor<128x1xf32, // CHECK-SAME: -> tensor<128xf32, - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [1 : i32], keep_dim = false}> : (tensor<128x10xf32>, tensor<128xf32>) -> tensor<128xf32> + %1 = "ttir.sum"(%arg0, %0) <{dim = [1 : i32], keep_dim = false}> : (tensor<128x10xf32>, tensor<128xf32>) -> tensor<128xf32> return %1 : tensor<128xf32> } func.func public @reduce_keep_dim(%arg0: tensor<128x10xf32>) -> tensor<128x1xf32> { %0 = tensor.empty() : tensor<128x1xf32> // CHECK: "ttnn.sum" - // CHECK-SAME: dim_arg = [1 : i32] + // CHECK-SAME: dim = [1 : i32] // CHECK-SAME: keep_dim = true // CHECK-SAME: tensor<128x10xf32, // CHECK-SAME: -> tensor<128x1xf32, // CHECK-NOT: "ttnn.reshape" - %1 = "ttir.sum"(%arg0, %0) <{dim_arg = [1 : i32], keep_dim = true}> : (tensor<128x10xf32>, tensor<128x1xf32>) -> tensor<128x1xf32> + %1 = "ttir.sum"(%arg0, %0) <{dim = [1 : i32], keep_dim = true}> : (tensor<128x10xf32>, tensor<128x1xf32>) -> tensor<128x1xf32> return %1 : tensor<128x1xf32> } }