From bbda3a8805eb7737ccef532e37a9a39191908c08 Mon Sep 17 00:00:00 2001 From: Abhinav Gunjal Date: Mon, 13 May 2024 10:05:21 -0700 Subject: [PATCH] Integrate LLVM at llvm/llvm-project@3a8316216807 (#2318) This PR bumps LLVM version to the latest LLVM integration commit. (backported from g3) 1. Tosa dialect op change form div to int_div 2. CHECK test expected log change (manually updated CHECK test expected logs) 3. stablehlo_legalize_to_vhlo.0_20_0.mlir test file, it is newly introduced (to fix asan cmake build failure) 3. disabled allow_user_poisoning. Created tracker https://github.com/openxla/stablehlo/issues/2326 to reenable it. Thanks @mlevesquedion for the workaround. --- .github/workflows/buildAndTestCMake.yml | 2 + WORKSPACE.bazel | 4 +- build_tools/llvm_version.txt | 2 +- .../conversions/linalg/tests/convolution.mlir | 2 +- .../linalg/tests/miscellaneous.mlir | 8 +- .../conversions/linalg/tests/random.mlir | 12 +- stablehlo/conversions/tosa/tests/binary.mlir | 10 +- .../transforms/StablehloLegalizeToTosa.pdll | 2 +- .../stablehlo_legalize_to_vhlo.0_10_0.mlir | 520 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_11_0.mlir | 523 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_12_0.mlir | 523 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_13_0.mlir | 523 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_14_0.mlir | 523 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_15_0.mlir | 523 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_16_0.mlir | 526 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_17_0.mlir | 541 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_18_0.mlir | 542 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_19_0.mlir | 550 ++++++++++++------ .../stablehlo_legalize_to_vhlo.0_20_0.mlir | 519 +++++++++++------ .../stablehlo_legalize_to_vhlo.0_9_0.mlir | 514 ++++++++++------ .../vhlo/stablehlo_legalize_to_vhlo.mlir | 550 ++++++++++++------ 21 files changed, 4666 insertions(+), 2253 deletions(-) diff --git a/.github/workflows/buildAndTestCMake.yml b/.github/workflows/buildAndTestCMake.yml index c84701118e5..c88ba21359a 100644 --- a/.github/workflows/buildAndTestCMake.yml +++ b/.github/workflows/buildAndTestCMake.yml @@ -82,6 +82,8 @@ jobs: CMAKE_BUILD_TYPE: Release STABLEHLO_ENABLE_BINDINGS_PYTHON: OFF STABLEHLO_ENABLE_SANITIZER: address + # TODO: remove this once https://github.com/openxla/stablehlo/pull/2318 is fixed + ASAN_OPTIONS: allow_user_poisoning=false - name: Build and Test StableHLO (with Python bindings) shell: bash diff --git a/WORKSPACE.bazel b/WORKSPACE.bazel index e5c87eb2668..cb0ac5fc7bb 100644 --- a/WORKSPACE.bazel +++ b/WORKSPACE.bazel @@ -17,9 +17,9 @@ workspace(name = "stablehlo") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -LLVM_COMMIT = "2914a11e3fad5d5634272f028b2765ac182d6b20" +LLVM_COMMIT = "fc57f88f007497a4ead0ec8607ac66e1847b02d6" -LLVM_SHA256 = "8826cb0f4afae546aae1eb266854f9a546d6dd2bbb700c5c5ac588294c02ae8d" +LLVM_SHA256 = "0b66773795454d466ef4dcfae7cf38c8200ac4ee431e069ddf68313b3486b004" http_archive( name = "llvm-raw", diff --git a/build_tools/llvm_version.txt b/build_tools/llvm_version.txt index 4d5fc086160..e126c0dddd1 100644 --- a/build_tools/llvm_version.txt +++ b/build_tools/llvm_version.txt @@ -1 +1 @@ -2914a11e3fad5d5634272f028b2765ac182d6b20 +fc57f88f007497a4ead0ec8607ac66e1847b02d6 diff --git a/stablehlo/conversions/linalg/tests/convolution.mlir b/stablehlo/conversions/linalg/tests/convolution.mlir index ea47e961ce6..5c6be5f2668 100644 --- a/stablehlo/conversions/linalg/tests/convolution.mlir +++ b/stablehlo/conversions/linalg/tests/convolution.mlir @@ -356,7 +356,7 @@ func.func @depthwise_conv(%arg0: tensor<2x4x5x2xf32>, } // CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32 // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[FILTER]] {{\[}}[0, 1, 2, 3]] : tensor<2x2x1x6xf32> into tensor<24xf32> -// CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[COLLAPSE]] {{\[}}[0, 1, 2, 3]] : tensor<24xf32> into tensor<2x2x2x3xf32> +// CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[COLLAPSE]] {{\[}}[0, 1, 2, 3]] output_shape [2, 2, 2, 3] : tensor<24xf32> into tensor<2x2x2x3xf32> // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<2x3x4x2x3xf32> // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CST]] : f32) outs(%[[INIT]] : tensor<2x3x4x2x3xf32>) -> tensor<2x3x4x2x3xf32> // CHECK: %[[OUT:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm diff --git a/stablehlo/conversions/linalg/tests/miscellaneous.mlir b/stablehlo/conversions/linalg/tests/miscellaneous.mlir index 57c8d0dbd5d..c231d385d02 100644 --- a/stablehlo/conversions/linalg/tests/miscellaneous.mlir +++ b/stablehlo/conversions/linalg/tests/miscellaneous.mlir @@ -884,7 +884,7 @@ func.func @reshape_0D_1D(%arg0: tensor) -> tensor<1xi32> { %0 = "stablehlo.reshape"(%arg0) : (tensor) -> tensor<1xi32> func.return %0 : tensor<1xi32> } -// CHECK: tensor.expand_shape %{{.*}} [] : tensor into tensor<1xi32> +// CHECK: tensor.expand_shape %{{.*}} [] output_shape [1] : tensor into tensor<1xi32> // ----- @@ -895,7 +895,7 @@ func.func @reshape_0D_1D_unsigned(%arg0: tensor) -> tensor<1xui32> { func.return %0 : tensor<1xui32> } // CHECK: %[[ARG_SIGNLESS:.*]] = builtin.unrealized_conversion_cast %[[ARG_UNSIGNED]] : tensor to tensor -// CHECK: %[[RET_SIGNLESS:.*]] = tensor.expand_shape %[[ARG_SIGNLESS]] [] : tensor into tensor<1xi32> +// CHECK: %[[RET_SIGNLESS:.*]] = tensor.expand_shape %[[ARG_SIGNLESS]] [] output_shape [1] : tensor into tensor<1xi32> // CHECK: %[[RET_UNSIGNED:.*]] = builtin.unrealized_conversion_cast %[[RET_SIGNLESS]] : tensor<1xi32> to tensor<1xui32> // CHECK: return %[[RET_UNSIGNED]] : tensor<1xui32> @@ -997,7 +997,7 @@ func.func @reshape_dynamic_in(%arg0: tensor) -> tensor<2x4x5xf32> { } // CHECK: %[[FLATTEN:.*]] = tensor.collapse_shape %{{.*}} {{\[}}[0, 1]] : tensor into tensor // CHECK: %[[CAST:.*]] = tensor.cast %[[FLATTEN]] : tensor to tensor<40xf32> -// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1, 2]] : tensor<40xf32> into tensor<2x4x5xf32> +// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1, 2]] output_shape [2, 4, 5] : tensor<40xf32> into tensor<2x4x5xf32> // ----- @@ -1007,7 +1007,7 @@ func.func @reshape_1D_2D_dynamic(%arg0: tensor) -> tensor<1x3xi32> { func.return %0 : tensor<1x3xi32> } // CHECK: %[[CAST:.*]] = tensor.cast %{{.*}} : tensor to tensor<3xi32> -// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1]] : tensor<3xi32> into tensor<1x3xi32> +// CHECK: tensor.expand_shape %[[CAST]] {{\[}}[0, 1]] output_shape [1, 3] : tensor<3xi32> into tensor<1x3xi32> // ----- diff --git a/stablehlo/conversions/linalg/tests/random.mlir b/stablehlo/conversions/linalg/tests/random.mlir index 8b1ac32ad23..334042ad4fd 100644 --- a/stablehlo/conversions/linalg/tests/random.mlir +++ b/stablehlo/conversions/linalg/tests/random.mlir @@ -480,8 +480,8 @@ func.func @philox_i64(%arg0: tensor<2xi64>) -> (tensor<2xi64>, tensor<8xi64>) { // CHECK-DAG: %[[VAL_101:.*]] = arith.xori %[[VAL_100]], %[[VAL_87]] : i32 // CHECK: linalg.yield %[[YIELDED_1:.*]], %[[YIELDED_2:.*]] : i64, i64 -// CHECK-DAG: %[[VAL_206:.*]] = tensor.expand_shape %[[VAL_207:.*]]#0 {{\[\[}}0, 1]] : tensor<4xi64> into tensor<4x1xi64> -// CHECK-DAG: %[[VAL_208:.*]] = tensor.expand_shape %[[VAL_207]]#1 {{\[\[}}0, 1]] : tensor<4xi64> into tensor<4x1xi64> +// CHECK-DAG: %[[VAL_206:.*]] = tensor.expand_shape %[[VAL_207:.*]]#0 {{\[\[}}0, 1]] +// CHECK-DAG: %[[VAL_208:.*]] = tensor.expand_shape %[[VAL_207]]#1 {{\[\[}}0, 1]] // CHECK-DAG: %[[VAL_209:.*]] = tensor.empty() : tensor<4x2xi64> // CHECK-DAG: %[[VAL_213:.*]] = tensor.insert %[[VAL_30]] into %[[VAL_0]]{{\[}}%[[VAL_19]]] : tensor<2xi64> @@ -575,10 +575,10 @@ func.func @philox_i32_odd(%arg0: tensor<2xi64>) -> (tensor<2xi64>, tensor<7x11xi // CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[CONCAT]] -// CHECK: %[[VAL_213:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] : tensor<80xi32> into tensor<80x1xi32> +// CHECK: %[[VAL_213:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] // CHECK: %[[VAL_214:.*]] = tensor.extract_slice %[[VAL_213]][0, 0] [77, 1] [1, 1] : tensor<80x1xi32> to tensor<77x1xi32> // CHECK: %[[VAL_215:.*]] = tensor.collapse_shape %[[VAL_214]] {{\[\[}}0, 1]] : tensor<77x1xi32> into tensor<77xi32> -// CHECK: %[[VAL_216:.*]] = tensor.expand_shape %[[VAL_215]] {{\[\[}}0, 1]] : tensor<77xi32> into tensor<7x11xi32> +// CHECK: %[[VAL_216:.*]] = tensor.expand_shape %[[VAL_215]] {{\[\[}}0, 1]] // CHECK: %[[VAL_217:.*]] = tensor.insert %[[NEWSTATE]] into %[[ARG0]]{{\[}}%[[C1]]] : tensor<2xi64> // CHECK: return %[[VAL_217]], %[[VAL_216]] : tensor<2xi64>, tensor<7x11xi32> @@ -616,10 +616,10 @@ func.func @philox_i64_odd(%arg0: tensor<2xi64>) -> (tensor<2xi64>, tensor<3x5xi6 // CHECK-DAG: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[CONCAT]] {{\[\[}}0, 1]] : tensor<8x2xi64> into tensor<16xi64> -// CHECK-DAG: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] : tensor<16xi64> into tensor<16x1xi64> +// CHECK-DAG: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSE]] {{\[\[}}0, 1]] // CHECK-DAG: %[[SLICE:.*]] = tensor.extract_slice %[[EXPANDED]][0, 0] [15, 1] [1, 1] : tensor<16x1xi64> to tensor<15x1xi64> // CHECK-DAG: %[[EXPAND_2:.*]] = tensor.collapse_shape %[[SLICE]] {{\[\[}}0, 1]] : tensor<15x1xi64> into tensor<15xi64> -// CHECK-DAG: %[[RESHAPE:.*]] = tensor.expand_shape %[[EXPAND_2]] {{\[\[}}0, 1]] : tensor<15xi64> into tensor<3x5xi64> +// CHECK-DAG: %[[RESHAPE:.*]] = tensor.expand_shape %[[EXPAND_2]] {{\[\[}}0, 1]] // CHECK-DAG: %[[INSERTED:.+]] = tensor.insert %[[NEWSTATE]] into %[[ARG0]][%[[C1]]] : tensor<2xi64> // CHECK: return %[[INSERTED]], %[[RESHAPE]] diff --git a/stablehlo/conversions/tosa/tests/binary.mlir b/stablehlo/conversions/tosa/tests/binary.mlir index 012dd77d3f3..4de314124d2 100644 --- a/stablehlo/conversions/tosa/tests/binary.mlir +++ b/stablehlo/conversions/tosa/tests/binary.mlir @@ -45,19 +45,11 @@ func.func @concatenate(%arg0 : tensor<3x3xf32>, %arg1 : tensor<3x3xf32>) -> tens // CHECK-LABEL: @divide func.func @divide(%arg0 : tensor<10xi32>, %arg1 : tensor<10xi32>) -> tensor<10xi32> { - // CHECK: tosa.div + // CHECK: tosa.int_div %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32> return %0 : tensor<10xi32> } -// CHECK-LABEL: @divide_f32 -func.func @divide_f32(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10xf32> { - // tosa.div only supports i32, so this should not legalize. - // CHECK: stablehlo.divide - %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> - return %0 : tensor<10xf32> -} - // CHECK-LABEL: @dot_vector_vector func.func @dot_vector_vector(%arg0 : tensor<3xf32>, %arg1 : tensor<3xf32>) -> tensor { // CHECK-DAG: %[[VAR0:.*]] = tosa.reshape %arg0 {new_shape = array} diff --git a/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll b/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll index 13f6244e401..22ee6121a91 100644 --- a/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll +++ b/stablehlo/conversions/tosa/transforms/StablehloLegalizeToTosa.pdll @@ -125,7 +125,7 @@ Pattern => Pattern => replace op(input0 : Value<_: Tosa_Int32Tensor>, input1 : Value<_: Tosa_Int32Tensor>) - with op(input0, input1); + with op(input0, input1); Pattern => replace op(input0 : Value<_: Tosa_Tensor>, input1 : Value<_: Tosa_Tensor>) diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir index f562fc075ac..b5b094a8091 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_10_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -160,6 +175,7 @@ func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -170,6 +186,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -180,6 +197,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -190,6 +208,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -202,6 +221,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -210,6 +230,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -219,6 +240,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -228,6 +250,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -237,6 +260,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -246,6 +270,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -255,6 +280,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -264,6 +290,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -275,6 +302,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -287,6 +315,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -299,6 +328,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -313,10 +343,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -324,8 +353,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -339,8 +369,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -362,8 +393,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -380,8 +412,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -389,8 +422,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -401,8 +435,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -413,8 +448,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -442,8 +478,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -460,8 +497,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -480,8 +518,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -489,8 +528,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -502,8 +542,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -531,8 +572,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -558,15 +600,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -587,8 +630,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -597,8 +641,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -606,8 +651,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -619,8 +665,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -632,8 +679,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -655,8 +703,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -678,8 +727,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -707,8 +757,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -736,8 +787,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -756,29 +808,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -794,8 +850,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -817,8 +874,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -836,22 +894,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -863,8 +924,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -876,8 +938,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -889,15 +952,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -907,8 +972,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -918,9 +984,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -929,22 +996,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -954,22 +1024,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -981,8 +1054,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -994,15 +1068,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1012,6 +1088,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1023,15 +1100,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1065,8 +1144,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1079,8 +1159,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1090,8 +1171,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1122,15 +1204,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1150,8 +1234,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1161,8 +1246,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1176,8 +1262,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1210,8 +1297,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1231,8 +1319,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1242,22 +1331,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1267,15 +1359,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1285,22 +1379,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1312,8 +1409,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1326,16 +1424,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1357,8 +1456,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1368,8 +1468,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1379,11 +1480,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1394,15 +1496,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1425,36 +1529,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1472,57 +1581,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1532,8 +1649,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1547,36 +1665,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1589,8 +1712,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1605,8 +1729,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1618,8 +1743,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1643,8 +1769,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1670,8 +1797,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1691,16 +1819,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1709,8 +1839,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1720,8 +1851,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1731,8 +1863,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1742,29 +1875,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1794,8 +1931,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1825,15 +1963,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1846,8 +1986,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1857,43 +1998,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1907,8 +2054,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1928,29 +2076,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1962,8 +2114,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1973,8 +2126,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -1990,15 +2144,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2008,22 +2164,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2041,8 +2200,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2050,183 +2210,209 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir index f023805ca04..6c812fa1c90 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_11_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -160,6 +175,7 @@ func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -170,6 +186,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -180,6 +197,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -190,6 +208,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -202,6 +221,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -210,6 +230,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -219,6 +240,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -228,6 +250,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -237,6 +260,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -246,6 +270,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -255,6 +280,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -264,6 +290,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -275,6 +302,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -287,6 +315,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -299,6 +328,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -313,10 +343,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -324,8 +353,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -339,8 +369,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -362,8 +393,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -380,8 +412,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -389,8 +422,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -401,8 +435,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -413,8 +448,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -442,8 +478,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -460,8 +497,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -480,8 +518,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -489,8 +528,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -502,8 +542,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -531,8 +572,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -558,15 +600,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -587,8 +630,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -597,8 +641,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -606,8 +651,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -619,8 +665,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -632,8 +679,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -655,8 +703,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -678,8 +727,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -707,8 +757,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -736,8 +787,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -756,29 +808,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -794,8 +850,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -817,8 +874,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -836,22 +894,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -863,8 +924,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -876,8 +938,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -889,15 +952,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -907,8 +972,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -918,9 +984,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -929,22 +996,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -954,22 +1024,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -981,8 +1054,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -994,15 +1068,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1012,6 +1088,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1023,15 +1100,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1065,8 +1144,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1079,8 +1159,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1090,8 +1171,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1122,15 +1204,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1150,8 +1234,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1161,8 +1246,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1176,8 +1262,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1210,8 +1297,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1231,8 +1319,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1242,22 +1331,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1267,15 +1359,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1285,22 +1379,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1312,8 +1409,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1326,16 +1424,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1357,8 +1456,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1368,8 +1468,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1379,11 +1480,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1394,15 +1496,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1425,36 +1529,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1472,57 +1581,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1532,8 +1649,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1547,36 +1665,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1589,8 +1712,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1605,8 +1729,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1618,8 +1743,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1643,8 +1769,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1670,8 +1797,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1691,16 +1819,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1709,8 +1839,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1720,8 +1851,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1731,8 +1863,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1742,29 +1875,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1794,8 +1931,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1825,15 +1963,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1846,8 +1986,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1857,43 +1998,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1907,8 +2054,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1928,29 +2076,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1962,8 +2114,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1973,8 +2126,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -1990,15 +2144,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2008,22 +2164,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2041,8 +2200,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2050,190 +2210,217 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir index 6eb60cde486..49b1d490d25 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_12_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -160,6 +175,7 @@ func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -170,6 +186,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -180,6 +197,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -190,6 +208,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -202,6 +221,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -210,6 +230,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -219,6 +240,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -228,6 +250,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -237,6 +260,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -246,6 +270,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -255,6 +280,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -264,6 +290,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -275,6 +302,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -287,6 +315,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -299,6 +328,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -313,10 +343,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -324,8 +353,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -339,8 +369,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -362,8 +393,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -380,8 +412,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -389,8 +422,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -401,8 +435,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -413,8 +448,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -442,8 +478,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -460,8 +497,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -480,8 +518,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -489,8 +528,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -502,8 +542,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -531,8 +572,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -558,15 +600,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -587,8 +630,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -597,8 +641,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -606,8 +651,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -619,8 +665,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -632,8 +679,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -655,8 +703,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -678,8 +727,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -707,8 +757,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -736,8 +787,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -756,29 +808,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -794,8 +850,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -817,8 +874,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -836,22 +894,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -863,8 +924,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -876,8 +938,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -889,15 +952,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -907,8 +972,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -918,9 +984,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -929,22 +996,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -954,22 +1024,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -981,8 +1054,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -994,15 +1068,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1012,6 +1088,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1023,15 +1100,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1065,8 +1144,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1079,8 +1159,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1090,8 +1171,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1122,15 +1204,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1150,8 +1234,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1161,8 +1246,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1176,8 +1262,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1210,8 +1297,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1231,8 +1319,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1242,22 +1331,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1267,15 +1359,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1285,22 +1379,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1312,8 +1409,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1326,16 +1424,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1357,8 +1456,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1368,8 +1468,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1379,11 +1480,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1394,15 +1496,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1425,36 +1529,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1472,57 +1581,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1532,8 +1649,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1547,36 +1665,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1589,8 +1712,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1605,8 +1729,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1618,8 +1743,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1643,8 +1769,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1670,8 +1797,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1691,16 +1819,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1709,8 +1839,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1720,8 +1851,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1731,8 +1863,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1742,29 +1875,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1794,8 +1931,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1825,15 +1963,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1846,8 +1986,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1857,43 +1998,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1907,8 +2054,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1928,29 +2076,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1962,8 +2114,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1973,8 +2126,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -1990,15 +2144,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2008,22 +2164,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2041,8 +2200,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2050,190 +2210,217 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir index a7670a687d3..ae049679833 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_13_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -160,6 +175,7 @@ func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -170,6 +186,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -180,6 +197,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -190,6 +208,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -202,6 +221,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -210,6 +230,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -219,6 +240,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -228,6 +250,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -237,6 +260,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -246,6 +270,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -255,6 +280,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -264,6 +290,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -275,6 +302,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -287,6 +315,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -299,6 +328,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -313,10 +343,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -324,8 +353,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -339,8 +369,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -362,8 +393,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -380,8 +412,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -389,8 +422,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -401,8 +435,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -413,8 +448,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -442,8 +478,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -460,8 +497,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -480,8 +518,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -489,8 +528,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -502,8 +542,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -531,8 +572,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -558,15 +600,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -587,8 +630,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -597,8 +641,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -606,8 +651,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -619,8 +665,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -632,8 +679,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -655,8 +703,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -678,8 +727,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -707,8 +757,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -736,8 +787,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -756,29 +808,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -794,8 +850,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -817,8 +874,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -836,22 +894,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -863,8 +924,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -876,8 +938,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -889,15 +952,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -907,8 +972,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -918,9 +984,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -929,22 +996,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -954,22 +1024,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -981,8 +1054,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -994,15 +1068,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1012,6 +1088,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1023,15 +1100,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1065,8 +1144,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1079,8 +1159,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1090,8 +1171,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1122,15 +1204,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1150,8 +1234,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1161,8 +1246,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1176,8 +1262,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1210,8 +1297,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1231,8 +1319,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1242,22 +1331,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1267,15 +1359,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1285,22 +1379,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1312,8 +1409,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1326,16 +1424,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1357,8 +1456,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1368,8 +1468,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1379,11 +1480,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1394,15 +1496,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1425,36 +1529,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1472,57 +1581,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1532,8 +1649,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1547,36 +1665,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1589,8 +1712,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1605,8 +1729,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1618,8 +1743,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1643,8 +1769,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1670,8 +1797,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1691,16 +1819,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1709,8 +1839,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1720,8 +1851,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1731,8 +1863,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1742,29 +1875,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1794,8 +1931,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1825,15 +1963,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1846,8 +1986,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1857,43 +1998,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1907,8 +2054,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1928,29 +2076,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1962,8 +2114,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1973,8 +2126,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -1990,15 +2144,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2008,22 +2164,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2041,8 +2200,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2050,190 +2210,217 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir index f1053d3af42..155bea97ae6 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_14_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -160,6 +175,7 @@ func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -170,6 +186,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -180,6 +197,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -190,6 +208,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -202,6 +221,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -210,6 +230,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -219,6 +240,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -228,6 +250,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -237,6 +260,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -246,6 +270,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -255,6 +280,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -264,6 +290,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -275,6 +302,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -287,6 +315,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -299,6 +328,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -313,10 +343,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -324,8 +353,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -339,8 +369,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -362,8 +393,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -380,8 +412,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -389,8 +422,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -401,8 +435,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -413,8 +448,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -442,8 +478,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -460,8 +497,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -480,8 +518,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -489,8 +528,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -502,8 +542,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -531,8 +572,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -558,15 +600,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -587,8 +630,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -597,8 +641,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -606,8 +651,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -619,8 +665,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -632,8 +679,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -655,8 +703,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -678,8 +727,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -707,8 +757,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -736,8 +787,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -756,29 +808,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -794,8 +850,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -817,8 +874,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -836,22 +894,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -863,8 +924,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -876,8 +938,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -889,15 +952,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -907,8 +972,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -918,9 +984,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -929,22 +996,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -954,22 +1024,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -981,8 +1054,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -994,15 +1068,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1012,6 +1088,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1023,15 +1100,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1065,8 +1144,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1079,8 +1159,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1090,8 +1171,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1122,15 +1204,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1150,8 +1234,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1161,8 +1246,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1176,8 +1262,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1210,8 +1297,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1231,8 +1319,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1242,22 +1331,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1267,15 +1359,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1285,22 +1379,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1312,8 +1409,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1326,16 +1424,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1357,8 +1456,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1368,8 +1468,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1379,11 +1480,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1394,15 +1496,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1425,36 +1529,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1472,57 +1581,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1532,8 +1649,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1547,36 +1665,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1589,8 +1712,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1605,8 +1729,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1618,8 +1743,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1643,8 +1769,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1670,8 +1797,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1691,16 +1819,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1709,8 +1839,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1720,8 +1851,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1731,8 +1863,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1742,29 +1875,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1794,8 +1931,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1825,15 +1963,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1846,8 +1986,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1857,43 +1998,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1907,8 +2054,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1928,29 +2076,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1962,8 +2114,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1973,8 +2126,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -1990,15 +2144,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2008,22 +2164,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2041,8 +2200,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2050,190 +2210,217 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir index 9ffe0c5852a..3241cdfaebb 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_15_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -166,6 +181,7 @@ func.func @attr_dict() attributes {stablehlo.attr = {attr1 = 1 : i32, attr2 = 2 // DotDimensionNumbers aka #stablehlo.dot is covered below. // CHECK-LABEL: "attr_fft_type_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -176,6 +192,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -186,6 +203,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -196,6 +214,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -208,6 +227,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -216,6 +236,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -225,6 +246,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -234,6 +256,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -243,6 +266,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -252,6 +276,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -261,6 +286,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -270,6 +296,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -281,6 +308,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -293,6 +321,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -305,6 +334,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -319,10 +349,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -330,8 +359,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -345,8 +375,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -368,8 +399,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -386,8 +418,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -395,8 +428,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -407,8 +441,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -419,8 +454,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -448,8 +484,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -466,8 +503,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -486,8 +524,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -495,8 +534,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -508,8 +548,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -537,8 +578,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -564,15 +606,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -593,8 +636,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -603,8 +647,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -612,8 +657,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -625,8 +671,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -638,8 +685,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -661,8 +709,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -684,8 +733,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -713,8 +763,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -742,8 +793,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -762,29 +814,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -800,8 +856,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -823,8 +880,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -842,22 +900,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -869,8 +930,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -882,8 +944,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -895,15 +958,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -913,8 +978,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -924,9 +990,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -935,22 +1002,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -960,22 +1030,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -987,8 +1060,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1000,15 +1074,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1018,6 +1094,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1029,15 +1106,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1071,8 +1150,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1085,8 +1165,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1096,8 +1177,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1128,15 +1210,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1156,8 +1240,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1167,8 +1252,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1182,8 +1268,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1216,8 +1303,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1237,8 +1325,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1248,22 +1337,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1273,15 +1365,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1291,22 +1385,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1318,8 +1415,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1332,16 +1430,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1363,8 +1462,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1374,8 +1474,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1385,11 +1486,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1400,15 +1502,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1431,36 +1535,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1478,57 +1587,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1538,8 +1655,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1553,36 +1671,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1595,8 +1718,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1611,8 +1735,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1624,8 +1749,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1649,8 +1775,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1676,8 +1803,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1697,16 +1825,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1715,8 +1845,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1726,8 +1857,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1737,8 +1869,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1748,29 +1881,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1800,8 +1937,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1831,15 +1969,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1852,8 +1992,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1863,43 +2004,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1913,8 +2060,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1934,29 +2082,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1968,8 +2120,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1979,8 +2132,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -1996,15 +2150,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2014,22 +2170,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2047,8 +2206,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2056,190 +2216,217 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir index d6bd6cba798..ea9cc6e7855 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_16_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -166,6 +181,7 @@ func.func @attr_dict() attributes {stablehlo.attr = {attr1 = 1 : i32, attr2 = 2 // DotDimensionNumbers aka #stablehlo.dot is covered below. // CHECK-LABEL: "attr_fft_type_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -176,6 +192,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -186,6 +203,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -196,6 +214,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -208,6 +227,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -216,6 +236,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -225,6 +246,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -234,6 +256,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -243,6 +266,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -252,6 +276,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -261,6 +286,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -270,6 +296,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -281,6 +308,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -293,6 +321,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -305,6 +334,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -319,10 +349,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -330,8 +359,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -345,8 +375,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -368,8 +399,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -386,8 +418,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -395,8 +428,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -407,8 +441,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_collective_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -419,8 +454,9 @@ func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8x } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -431,8 +467,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -460,8 +497,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -478,8 +516,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -498,8 +537,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -507,8 +547,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -520,8 +561,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -549,8 +591,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -576,15 +619,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -605,8 +649,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -615,8 +660,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -624,8 +670,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -637,8 +684,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -650,8 +698,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -673,8 +722,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -696,8 +746,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -725,8 +776,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -754,8 +806,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -774,29 +827,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -812,8 +869,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -835,8 +893,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -854,22 +913,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -881,8 +943,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -894,8 +957,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -907,15 +971,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -925,8 +991,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -936,9 +1003,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -947,22 +1015,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -972,22 +1043,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -999,8 +1073,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1012,15 +1087,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1030,6 +1107,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1041,15 +1119,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1083,8 +1163,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1097,8 +1178,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1108,8 +1190,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1140,15 +1223,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1168,8 +1253,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1179,8 +1265,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1194,8 +1281,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1228,8 +1316,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1249,8 +1338,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1260,22 +1350,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1285,15 +1378,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1303,22 +1398,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1330,8 +1428,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1344,16 +1443,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1375,8 +1475,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1386,8 +1487,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1397,11 +1499,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1412,15 +1515,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1443,36 +1548,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1490,57 +1600,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1550,8 +1668,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1565,36 +1684,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1607,8 +1731,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1623,8 +1748,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1636,8 +1762,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1661,8 +1788,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1688,8 +1816,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1709,16 +1838,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1727,8 +1858,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1738,8 +1870,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1749,8 +1882,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1760,29 +1894,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1812,8 +1950,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1843,15 +1982,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1864,8 +2005,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1875,43 +2017,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1925,8 +2073,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1946,29 +2095,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1980,8 +2133,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1991,8 +2145,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -2008,15 +2163,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2026,22 +2183,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2059,8 +2219,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2068,190 +2229,217 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir index aca6160f7ee..6ecd6c27281 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_17_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -166,6 +181,7 @@ func.func @attr_dict() attributes {stablehlo.attr = {attr1 = 1 : i32, attr2 = 2 // DotDimensionNumbers aka #stablehlo.dot is covered below. // CHECK-LABEL: "attr_fft_type_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -176,6 +192,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -186,6 +203,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -196,6 +214,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -208,6 +227,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -216,6 +236,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -225,6 +246,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -234,6 +256,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -243,6 +266,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -252,6 +276,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -261,6 +286,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -270,6 +296,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -281,6 +308,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -293,6 +321,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -305,6 +334,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -319,10 +349,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -330,8 +359,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -345,8 +375,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -368,8 +399,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -386,8 +418,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -395,8 +428,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -407,8 +441,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_collective_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -419,8 +454,9 @@ func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8x } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -431,8 +467,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -460,8 +497,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -478,8 +516,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -498,8 +537,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -507,8 +547,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -520,8 +561,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -549,8 +591,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -576,15 +619,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -605,8 +649,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -615,8 +660,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -624,8 +670,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -637,8 +684,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -650,8 +698,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -673,8 +722,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -696,8 +746,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -725,8 +776,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -754,8 +806,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -774,29 +827,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -812,8 +869,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -836,7 +894,7 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { // CHECK-LABEL: "op_all_reduce_with_promotable_types" func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -854,8 +912,9 @@ func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor< } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -873,22 +932,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -900,8 +962,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -913,8 +976,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -926,15 +990,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -944,8 +1010,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -955,9 +1022,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -966,22 +1034,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -991,22 +1062,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -1018,8 +1092,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1031,15 +1106,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1049,6 +1126,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1060,15 +1138,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1102,8 +1182,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1116,8 +1197,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1127,8 +1209,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1159,15 +1242,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1187,8 +1272,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1198,8 +1284,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1213,8 +1300,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1247,8 +1335,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1268,8 +1357,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1279,22 +1369,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1304,15 +1397,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1322,22 +1417,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1349,8 +1447,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1363,16 +1462,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1394,8 +1494,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1405,8 +1506,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1416,11 +1518,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1431,15 +1534,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1462,36 +1567,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1509,57 +1619,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1569,8 +1687,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1584,36 +1703,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1626,8 +1750,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1642,8 +1767,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1657,7 +1783,7 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { // CHECK_lABEL: "op_reduce_with_promotable_types" func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) -> (tensor<4xf64>) { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> @@ -1672,8 +1798,9 @@ func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tenso } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1698,7 +1825,7 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> @@ -1715,8 +1842,9 @@ func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> t // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1741,11 +1869,11 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> func.return %0 : tensor<2x9x16x7xf32> } -// CHECK_lABEL: "op_reduce_window_with_promotable_types" +// CHECK-LABEL: "op_reduce_window_with_promotable_types" func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> (tensor<2x2xf64>, tensor<2x2xf32>) { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) @@ -1765,8 +1893,9 @@ func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1786,16 +1915,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1804,8 +1935,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1815,8 +1947,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1826,8 +1959,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1837,29 +1971,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1892,7 +2030,7 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> tensor<200x100x300xf64> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> @@ -1915,8 +2053,9 @@ func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf3 } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1946,8 +2085,9 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () @@ -1969,15 +2109,17 @@ func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1990,8 +2132,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -2001,43 +2144,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -2051,8 +2200,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -2072,29 +2222,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -2106,8 +2260,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -2117,8 +2272,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -2134,15 +2290,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2152,22 +2310,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2185,8 +2346,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2194,190 +2356,217 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir index 2c0208d480d..fca84ac4803 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_18_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -166,6 +181,7 @@ func.func @attr_dict() attributes {stablehlo.attr = {attr1 = 1 : i32, attr2 = 2 // DotDimensionNumbers aka #stablehlo.dot is covered below. // CHECK-LABEL: "attr_fft_type_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -176,6 +192,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -186,6 +203,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -196,6 +214,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -208,6 +227,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -216,6 +236,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -225,6 +246,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -234,6 +256,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -243,6 +266,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -252,6 +276,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -261,6 +286,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -270,6 +296,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -281,6 +308,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -293,6 +321,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -305,6 +334,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -319,10 +349,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -330,8 +359,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -345,8 +375,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -368,8 +399,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -386,8 +418,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -395,8 +428,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -407,8 +441,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_collective_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -419,8 +454,9 @@ func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8x } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -431,8 +467,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -460,8 +497,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -478,8 +516,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -498,8 +537,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -507,8 +547,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -520,8 +561,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -549,8 +591,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -576,15 +619,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -605,8 +649,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -615,8 +660,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -624,8 +670,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -637,8 +684,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -650,8 +698,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -673,8 +722,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -696,8 +746,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -725,8 +776,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -754,8 +806,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -774,29 +827,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -812,8 +869,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -836,7 +894,7 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { // CHECK-LABEL: "op_all_reduce_with_promotable_types" func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -854,8 +912,9 @@ func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor< } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -873,22 +932,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -900,8 +962,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -913,8 +976,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -926,15 +990,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -944,8 +1010,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -955,9 +1022,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -966,22 +1034,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -991,22 +1062,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -1018,8 +1092,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1031,15 +1106,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1049,6 +1126,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1060,15 +1138,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1102,8 +1182,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1116,8 +1197,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1127,8 +1209,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1159,15 +1242,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1187,8 +1272,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1198,8 +1284,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1213,8 +1300,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1247,8 +1335,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1268,8 +1357,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1279,22 +1369,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1304,15 +1397,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1322,22 +1417,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1349,8 +1447,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1363,16 +1462,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1394,8 +1494,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1405,8 +1506,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1416,11 +1518,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1431,15 +1534,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1462,36 +1567,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1509,57 +1619,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1569,8 +1687,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1584,36 +1703,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1626,8 +1750,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1642,8 +1767,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1657,7 +1783,7 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { // CHECK_lABEL: "op_reduce_with_promotable_types" func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) -> (tensor<4xf64>) { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> @@ -1672,8 +1798,9 @@ func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tenso } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1698,7 +1825,7 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> @@ -1715,8 +1842,9 @@ func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> t // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1745,7 +1873,7 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> (tensor<2x2xf64>, tensor<2x2xf32>) { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) @@ -1765,8 +1893,9 @@ func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1786,16 +1915,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1804,8 +1935,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1815,8 +1947,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1826,8 +1959,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1837,29 +1971,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1892,7 +2030,7 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> tensor<200x100x300xf64> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> @@ -1915,8 +2053,9 @@ func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf3 } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1946,8 +2085,9 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () @@ -1969,15 +2109,17 @@ func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1990,8 +2132,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -2001,43 +2144,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -2051,8 +2200,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -2072,29 +2222,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -2106,8 +2260,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -2117,8 +2272,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -2134,15 +2290,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2152,22 +2310,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2185,8 +2346,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2194,197 +2356,225 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_per_tensor_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_per_axis_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { - // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> func.return %0 : tensor<2x!quant.uniform> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir index 14bfa0b23fa..996d317b2dc 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_19_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -166,6 +181,7 @@ func.func @attr_dict() attributes {stablehlo.attr = {attr1 = 1 : i32, attr2 = 2 // DotDimensionNumbers aka #stablehlo.dot is covered below. // CHECK-LABEL: "attr_fft_type_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -176,6 +192,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -186,6 +203,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -196,6 +214,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -208,6 +227,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -216,6 +236,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -225,6 +246,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -234,6 +256,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -243,6 +266,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -252,6 +276,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -261,6 +286,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -270,6 +296,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -281,6 +308,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -293,6 +321,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -305,6 +334,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -319,10 +349,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -330,8 +359,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -345,8 +375,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -368,8 +399,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -386,8 +418,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -395,8 +428,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -407,8 +441,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_collective_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -419,8 +454,9 @@ func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8x } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -431,8 +467,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_composite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_composite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.composite_v1"(%arg0) <{ + // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{}> // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> @@ -446,8 +483,9 @@ func.func @default_composite(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -475,8 +513,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -493,8 +532,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -513,8 +553,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -522,8 +563,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -535,8 +577,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -564,8 +607,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -591,15 +635,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -620,8 +665,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -630,8 +676,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -639,8 +686,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -652,8 +700,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -665,8 +714,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -688,8 +738,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -711,8 +762,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -740,8 +792,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -769,8 +822,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -789,29 +843,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -827,8 +885,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -851,7 +910,7 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { // CHECK-LABEL: "op_all_reduce_with_promotable_types" func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -869,8 +928,9 @@ func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor< } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -888,22 +948,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -915,8 +978,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -928,8 +992,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -941,15 +1006,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -959,8 +1026,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -970,9 +1038,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -981,22 +1050,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -1006,22 +1078,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -1033,8 +1108,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1046,15 +1122,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_composite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_composite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.composite_v1"(%arg0) <{ + // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{#vhlo.string_v1<"my_int"> = #vhlo.integer_v1<1 : i64>, #vhlo.string_v1<"my_string"> = #vhlo.string_v1<"foo">}> // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> @@ -1073,8 +1151,9 @@ func.func @op_composite(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1084,6 +1163,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1095,15 +1175,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1137,8 +1219,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1151,8 +1234,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1162,8 +1246,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1194,15 +1279,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1222,8 +1309,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1233,8 +1321,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1248,8 +1337,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1282,8 +1372,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1303,8 +1394,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1314,22 +1406,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1339,15 +1434,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1357,22 +1454,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1384,8 +1484,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1398,16 +1499,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1429,8 +1531,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1440,8 +1543,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1451,11 +1555,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1466,15 +1571,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1497,36 +1604,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1544,57 +1656,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1604,8 +1724,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1619,36 +1740,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1661,8 +1787,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1677,8 +1804,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1689,10 +1817,10 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { func.return %0 : tensor } -// CHECK_lABEL: "op_reduce_with_promotable_types" +// CHECK-LABEL: "op_reduce_with_promotable_types" func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) -> (tensor<4xf64>) { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> @@ -1707,8 +1835,9 @@ func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tenso } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1733,7 +1862,7 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> @@ -1750,8 +1879,9 @@ func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> t // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1780,7 +1910,7 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> (tensor<2x2xf64>, tensor<2x2xf32>) { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) @@ -1800,8 +1930,9 @@ func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1821,16 +1952,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1839,8 +1972,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1850,8 +1984,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1861,8 +1996,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1872,29 +2008,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1927,7 +2067,7 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> tensor<200x100x300xf64> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> @@ -1950,8 +2090,9 @@ func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf3 } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1981,8 +2122,9 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () @@ -2004,15 +2146,17 @@ func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -2025,8 +2169,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -2036,43 +2181,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -2086,8 +2237,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -2107,29 +2259,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -2141,8 +2297,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -2152,8 +2309,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -2169,15 +2327,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2187,22 +2347,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2220,8 +2383,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2229,197 +2393,225 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_per_tensor_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_per_axis_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { - // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> func.return %0 : tensor<2x!quant.uniform> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_20_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_20_0.mlir index 684f7cbf9b2..605cdb5b536 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_20_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_20_0.mlir @@ -319,19 +319,20 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () + %arg0: tensor>) + -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } - // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -345,8 +346,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -368,8 +370,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -386,8 +389,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -395,8 +399,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -407,8 +412,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_collective_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -419,8 +425,9 @@ func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8x } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -431,8 +438,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_composite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_composite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.composite_v1"(%arg0) <{ + // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{}> // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> @@ -446,8 +454,9 @@ func.func @default_composite(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -475,8 +484,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -493,8 +503,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -513,8 +524,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -522,8 +534,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -535,8 +548,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v2"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v2"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -563,8 +577,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -590,15 +605,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -619,8 +635,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -629,8 +646,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -638,8 +656,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -651,8 +670,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -664,8 +684,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -687,8 +708,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -710,8 +732,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -739,8 +762,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -768,8 +792,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -788,29 +813,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -826,8 +855,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -849,8 +879,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_reduce_with_promotable_types" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -868,8 +899,9 @@ func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor< } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -887,22 +919,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -914,8 +949,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -927,8 +963,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -940,15 +977,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -958,8 +997,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -969,9 +1009,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -980,22 +1021,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -1005,22 +1049,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -1032,8 +1079,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1045,15 +1093,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_composite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_composite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.composite_v1"(%arg0) <{ + // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{#vhlo.string_v1<"my_int"> = #vhlo.integer_v1<1 : i64>, #vhlo.string_v1<"my_string"> = #vhlo.string_v1<"foo">}> // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> @@ -1072,8 +1122,9 @@ func.func @op_composite(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1083,6 +1134,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1094,15 +1146,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1136,8 +1190,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1150,8 +1205,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1161,8 +1217,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1193,15 +1250,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1221,8 +1280,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1232,8 +1292,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1247,8 +1308,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v2"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v2"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1280,8 +1342,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1301,8 +1364,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1312,22 +1376,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1337,15 +1404,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1355,22 +1424,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1382,8 +1454,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1396,16 +1469,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1427,8 +1501,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1438,8 +1513,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1449,11 +1525,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1464,15 +1541,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1495,36 +1574,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1542,57 +1626,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1602,8 +1694,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1617,36 +1710,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1659,8 +1757,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1675,8 +1774,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1690,7 +1790,7 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { // CHECK_lABEL: "op_reduce_with_promotable_types" func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) -> (tensor<4xf64>) { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> @@ -1705,8 +1805,9 @@ func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tenso } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1731,7 +1832,7 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> @@ -1749,7 +1850,7 @@ func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> t // CHECK-LABEL: "op_reduce_window" func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1778,7 +1879,7 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> (tensor<2x2xf64>, tensor<2x2xf32>) { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) @@ -1798,8 +1899,9 @@ func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1819,16 +1921,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1837,8 +1941,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1848,8 +1953,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1859,8 +1965,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1870,29 +1977,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1925,7 +2036,7 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> tensor<200x100x300xf64> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> @@ -1948,8 +2059,9 @@ func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf3 } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1979,8 +2091,9 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () @@ -2002,15 +2115,17 @@ func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -2023,8 +2138,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -2034,43 +2150,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -2084,8 +2206,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -2105,29 +2228,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -2139,8 +2266,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -2150,8 +2278,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -2167,15 +2296,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2185,22 +2316,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2218,8 +2352,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2227,197 +2362,225 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_per_tensor_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_per_axis_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { - // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> func.return %0 : tensor<2x!quant.uniform> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir index 795189af8f4..fb3059c7cc4 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.0_9_0.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -160,6 +175,7 @@ func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -170,6 +186,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -180,6 +197,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -190,6 +208,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -202,6 +221,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -210,6 +230,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -219,6 +240,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -228,6 +250,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -237,6 +260,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -246,6 +270,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -255,6 +280,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -264,6 +290,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -275,6 +302,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -287,6 +315,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -299,6 +328,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -313,10 +343,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -324,8 +353,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -339,8 +369,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -362,8 +393,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -380,8 +412,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -389,8 +422,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -401,8 +435,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -413,8 +448,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -442,8 +478,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -460,8 +497,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -480,8 +518,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -489,8 +528,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -502,8 +542,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -531,8 +572,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -558,15 +600,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -587,8 +630,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -597,8 +641,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -606,8 +651,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -619,8 +665,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -632,8 +679,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -655,8 +703,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -678,8 +727,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -707,8 +757,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -736,8 +787,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -756,29 +808,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -794,8 +850,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -817,8 +874,9 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -836,22 +894,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -863,8 +924,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -876,8 +938,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -889,15 +952,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -907,8 +972,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -918,9 +984,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -929,22 +996,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -954,22 +1024,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -981,8 +1054,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -994,15 +1068,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1012,6 +1088,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1023,15 +1100,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1065,8 +1144,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1079,8 +1159,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1090,8 +1171,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1122,15 +1204,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1150,8 +1234,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1161,8 +1246,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1176,8 +1262,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi32>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1210,8 +1297,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1231,8 +1319,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1242,22 +1331,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1267,15 +1359,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1285,22 +1379,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1312,8 +1409,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1326,16 +1424,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1357,8 +1456,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1368,8 +1468,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1379,11 +1480,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1394,15 +1496,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1425,36 +1529,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1472,57 +1581,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1532,8 +1649,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1547,36 +1665,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1589,8 +1712,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1605,8 +1729,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1618,8 +1743,9 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1643,8 +1769,9 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1670,8 +1797,9 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1691,16 +1819,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1709,8 +1839,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1720,8 +1851,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1731,8 +1863,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1742,29 +1875,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1794,8 +1931,9 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1825,15 +1963,17 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1846,8 +1986,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -1857,43 +1998,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1907,8 +2054,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -1928,29 +2076,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -1962,8 +2114,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -1973,8 +2126,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -1990,15 +2144,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2008,22 +2164,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2041,8 +2200,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2050,169 +2210,193 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo" diff --git a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir index dab762b6468..2a4ab5bb39d 100644 --- a/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir +++ b/stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir @@ -13,6 +13,7 @@ // ============ ATTRIBUTES ============ // CHECK-LABEL: "attr_comparison_direction_eq" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -22,6 +23,7 @@ func.func @attr_comparison_direction_eq(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ne" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -31,6 +33,7 @@ func.func @attr_comparison_direction_ne(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_ge" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -40,6 +43,7 @@ func.func @attr_comparison_direction_ge(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_gt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -49,6 +53,7 @@ func.func @attr_comparison_direction_gt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_le" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -58,6 +63,7 @@ func.func @attr_comparison_direction_le(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_direction_lt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { // CHECK: comparison_direction = #vhlo @@ -67,6 +73,7 @@ func.func @attr_comparison_direction_lt(%arg0: tensor, %arg1: tensor) } // CHECK-LABEL: "attr_comparison_type_notype" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo @@ -76,6 +83,7 @@ func.func @attr_comparison_type_notype(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_float" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -86,6 +94,7 @@ func.func @attr_comparison_type_float(%arg0: tensor, %arg1: tensor) -> } // CHECK-LABEL: "attr_comparison_type_totalorder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -96,6 +105,7 @@ func.func @attr_comparison_type_totalorder(%arg0: tensor, %arg1: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -106,6 +116,7 @@ func.func @attr_comparison_type_signed(%arg0: tensor, %arg1: tensor) - } // CHECK-LABEL: "attr_comparison_type_unsigned" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) -> tensor { %0 = "stablehlo.compare"(%arg0, %arg1) { comparison_direction = #stablehlo, @@ -118,6 +129,7 @@ func.func @attr_comparison_type_unsigned(%arg0: tensor, %arg1: tensor) // ConvDimensionNumbers aka #stablehlo.conv is covered below. // CHECK-LABEL: "attr_custom_call_api_version_unspecified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -128,6 +140,7 @@ func.func @attr_custom_call_api_version_unspecified(%arg0: tensor) -> tenso } // CHECK-LABEL: "attr_custom_call_api_version_original" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -138,6 +151,7 @@ func.func @attr_custom_call_api_version_original(%arg0: tensor) -> tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -148,6 +162,7 @@ func.func @attr_custom_call_api_version_status_returning(%arg0: tensor) -> } // CHECK-LABEL: "attr_custom_call_api_version_status_returning_unified" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_custom_call_api_version_status_returning_unified(%arg0: tensor) -> tensor { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo", @@ -166,6 +181,7 @@ func.func @attr_dict() attributes {stablehlo.attr = {attr1 = 1 : i32, attr2 = 2 // DotDimensionNumbers aka #stablehlo.dot is covered below. // CHECK-LABEL: "attr_fft_type_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -176,6 +192,7 @@ func.func @attr_fft_type_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomple } // CHECK-LABEL: "attr_fft_type_ifft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -186,6 +203,7 @@ func.func @attr_fft_type_ifft(%arg0: tensor<16xcomplex>) -> tensor<16xcompl } // CHECK-LABEL: "attr_fft_type_rfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -196,6 +214,7 @@ func.func @attr_fft_type_rfft(%arg0: tensor<16xf32>) -> tensor<9xcomplex> { } // CHECK-LABEL: "attr_fft_type_irfft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> { %0 = "stablehlo.fft"(%arg0) { // CHECK: fft_type = #vhlo @@ -208,6 +227,7 @@ func.func @attr_fft_type_irfft(%arg0: tensor<9xcomplex>) -> tensor<16xf32> // GatherDimensionNumbers aka #stablehlo.gather is covered below. // CHECK-LABEL: "attr_precision_config_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -216,6 +236,7 @@ func.func @attr_precision_config_default(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_precision_config_high" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -225,6 +246,7 @@ func.func @attr_precision_config_high(%arg0: tensor<8x16xf32>, %arg1: tensor<16x } // CHECK-LABEL: "attr_precision_config_highest" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { %0 = "stablehlo.dot"(%arg0, %arg1) { // CHECK: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> @@ -234,6 +256,7 @@ func.func @attr_precision_config_highest(%arg0: tensor<8x16xf32>, %arg1: tensor< } // CHECK-LABEL: "attr_rng_algorithm_default" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -243,6 +266,7 @@ func.func @attr_rng_algorithm_default(%arg0: tensor) -> (tensor, tenso } // CHECK-LABEL: "attr_rng_algorithm_three_fry" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -252,6 +276,7 @@ func.func @attr_rng_algorithm_three_fry(%arg0: tensor) -> (tensor, ten } // CHECK-LABEL: "attr_rng_algorithm_philox" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor) { %0:2 = "stablehlo.rng_bit_generator"(%arg0) { // CHECK: rng_algorithm = #vhlo @@ -261,6 +286,7 @@ func.func @attr_rng_algorithm_philox(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "attr_rng_distribution_uniform" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -270,6 +296,7 @@ func.func @attr_rng_distribution_uniform(%arg0: tensor, %arg1: tensor, } // CHECK-LABEL: "attr_rng_distribution_normal" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { // CHECK: rng_distribution = #vhlo @@ -281,6 +308,7 @@ func.func @attr_rng_distribution_normal(%arg0: tensor, %arg1: tensor, // ScatterDimensionNumbers aka #stablehlo.scatter is covered below. // CHECK-LABEL: "attr_transpose_no_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -293,6 +321,7 @@ func.func @attr_transpose_no_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<1 } // CHECK-LABEL: "attr_transpose_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -305,6 +334,7 @@ func.func @attr_transpose_transpose(%arg0: tensor<16x16xf32>, %arg1: tensor<16x1 } // CHECK-LABEL: "attr_transpose_adjoint" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { %0 = "stablehlo.triangular_solve"(%arg0, %arg1) { left_side = true, @@ -319,10 +349,9 @@ func.func @attr_transpose_adjoint(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16x // TypeExtensionsAttr aka #stablehlo.type_extensions is covered below. // CHECK-LABEL: "attr_type_extensions_bounds" -func.func @attr_type_extensions_bounds( - %arg0: tensor>) - -> tensor> { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1>) -> () +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) +func.func @attr_type_extensions_bounds(%arg0: tensor>) -> tensor> { + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> () func.return %arg0 : tensor> } @@ -330,8 +359,9 @@ func.func @attr_type_extensions_bounds( // ============ DEFAULTS ============ // CHECK-LABEL: "default_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -345,8 +375,9 @@ func.func @default_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "default_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) // CHECK-SAME: <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -368,8 +399,9 @@ func.func @default_all_reduce(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -386,8 +418,9 @@ func.func @default_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "default_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) : (tensor<1x16x16xf32>) -> tensor<1x16x16xf32> @@ -395,8 +428,9 @@ func.func @default_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "default_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -407,8 +441,9 @@ func.func @default_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf3 } // CHECK-LABEL: "default_collective_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.collective_broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -419,8 +454,9 @@ func.func @default_collective_broadcast(%arg0: tensor<16x8xf32>) -> tensor<16x8x } // CHECK-LABEL: "default_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -431,8 +467,9 @@ func.func @default_compare(%arg0: tensor, %arg1: tensor) -> tensor } // CHECK-LABEL: "default_composite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_composite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.composite_v1"(%arg0) <{ + // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{}> // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> @@ -446,8 +483,9 @@ func.func @default_composite(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x6x6x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -475,8 +513,9 @@ func.func @default_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x2 } // CHECK-LABEL: "default_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -493,8 +532,9 @@ func.func @default_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "default_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -513,8 +553,9 @@ func.func @default_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf } // CHECK-LABEL: "default_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<16x8xf32>) -> tensor<8x8xf32> @@ -522,8 +563,9 @@ func.func @default_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tens } // CHECK-LABEL: "default_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<0xi64>> @@ -535,8 +577,9 @@ func.func @default_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tenso } // CHECK-LABEL: "default_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi64>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v2"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v2"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -563,8 +606,9 @@ func.func @default_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x } // CHECK-LABEL: "default_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -590,15 +634,16 @@ func.func @default_func(%arg0: tensor) -> tensor { // CHECK-SAME: sym_name = #vhlo.string_v1<"default_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<""> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -619,8 +664,9 @@ func.func @dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) } // CHECK-LABEL: "default_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -629,8 +675,9 @@ func.func @default_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.t } // CHECK-LABEL: "default_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<""> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) : (tensor, !stablehlo.token) -> !stablehlo.token @@ -638,8 +685,9 @@ func.func @default_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stab } // CHECK-LABEL: "default_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -651,8 +699,9 @@ func.func @default_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.tok } // CHECK-LABEL: "default_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -664,8 +713,9 @@ func.func @default_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stableh } // CHECK-LABEL: "default_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -687,8 +737,9 @@ func.func @default_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "default_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x16x30x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -710,8 +761,9 @@ func.func @default_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -739,8 +791,9 @@ func.func @default_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi3 } // CHECK-LABEL: "default_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<10x23x23x64xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -768,8 +821,9 @@ func.func @default_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: ten } // CHECK-LABEL: "default_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<-1 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -788,29 +842,33 @@ func.func @default_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { // ============ OPS ============ // CHECK-LABEL: "op_abs" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_abs(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_add" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_add(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_after_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_after_all(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.after_all_v1"(%arg0) : (!vhlo.token_v1) -> !vhlo.token_v1 + // CHECK: "vhlo.after_all_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.after_all"(%arg0) : (!stablehlo.token) -> !stablehlo.token func.return %0 : !stablehlo.token } // CHECK-LABEL: "op_all_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.all_gather_v1"(%arg0) <{ + // CHECK: "vhlo.all_gather_v1"(%[[ARG0]]) <{ // CHECK-SAME: all_gather_dim = #vhlo.integer_v1<1 : i64> // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, @@ -826,8 +884,9 @@ func.func @op_all_gather(%arg0: tensor<16x8xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_all_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_reduce(%arg0: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) <{ + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: use_global_device_ids = #vhlo.bool_v1 @@ -850,7 +909,7 @@ func.func @op_all_reduce(%arg0: tensor) -> tensor { // CHECK-LABEL: "op_all_reduce_with_promotable_types" func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor { - // CHECK: "vhlo.all_reduce_v1"(%arg0) + // CHECK: "vhlo.all_reduce_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -868,8 +927,9 @@ func.func @op_all_reduce_with_promotable_types(%operand: tensor) -> tensor< } // CHECK-LABEL: "op_all_to_all" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { - // CHECK: "vhlo.all_to_all_v1"(%arg0) <{ + // CHECK: "vhlo.all_to_all_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: concat_dimension = #vhlo.integer_v1<0 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<1x4xi64>>, @@ -887,22 +947,25 @@ func.func @op_all_to_all(%arg0: tensor<4x16xf32>) -> tensor<16x4xf32> { } // CHECK-LABEL: "op_and" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_and(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_atan2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_atan2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.atan2_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.atan2_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.atan2"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_batch_norm_grad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16x16x16x16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_grad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_grad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -914,8 +977,9 @@ func.func @op_batch_norm_grad(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf } // CHECK-LABEL: "op_batch_norm_inference" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>, %arg3: tensor<16xf32>, %arg4: tensor<16xf32>) -> tensor<16x16x16x16xf32> { - // CHECK: "vhlo.batch_norm_inference_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) <{ + // CHECK: "vhlo.batch_norm_inference_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1> @@ -927,8 +991,9 @@ func.func @op_batch_norm_inference(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor } // CHECK-LABEL: "op_batch_norm_training" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor<16xf32>, %arg2: tensor<16xf32>) -> (tensor<16x16x16x16xf32>, tensor<16xf32>, tensor<16xf32>) { - // CHECK: "vhlo.batch_norm_training_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.batch_norm_training_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: epsilon = #vhlo.float_v1<1.000000e-03 : !vhlo.f32_v1>, // CHECK-SAME: feature_index = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) -> (!vhlo.tensor_v1<16x16x16x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x!vhlo.f32_v1>) @@ -940,15 +1005,17 @@ func.func @op_batch_norm_training(%arg0: tensor<16x16x16x16xf32>, %arg1: tensor< } // CHECK-LABEL: "op_bitcast_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_bitcast_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.bitcast_convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.bitcast_convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.bitcast_convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_in_dim_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_in_dim_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast_in_dim"(%arg0) { @@ -958,8 +1025,9 @@ func.func @op_broadcast_in_dim(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_broadcast" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.broadcast_v1"(%arg0) <{ + // CHECK: "vhlo.broadcast_v1"(%[[ARG0]]) <{ // CHECK-SAME: broadcast_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x16x!vhlo.f32_v1> %0 = "stablehlo.broadcast"(%arg0) { @@ -969,9 +1037,10 @@ func.func @op_broadcast(%arg0: tensor<16xf32>) -> tensor<16x16xf32> { } // CHECK-LABEL: "op_case" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -980,22 +1049,25 @@ func.func @op_case(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_cbrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cbrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cbrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cbrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cbrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_ceil" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_ceil(%arg0: tensor) -> tensor { - // CHECK: "vhlo.ceil_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.ceil_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.ceil"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_cholesky" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { - // CHECK: "vhlo.cholesky_v1"(%arg0) <{ + // CHECK: "vhlo.cholesky_v1"(%[[ARG0]]) <{ // CHECK-SAME: lower = #vhlo.bool_v1 // CHECK-SAME: }> : (!vhlo.tensor_v1<1x16x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<1x16x16x!vhlo.f32_v1> %0 = "stablehlo.cholesky"(%arg0) { @@ -1005,22 +1077,25 @@ func.func @op_cholesky(%arg0: tensor<1x16x16xf32>) -> tensor<1x16x16xf32> { } // CHECK-LABEL: "op_clamp" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_clamp(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.clamp_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.clamp_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.clamp"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_count_leading_zeros" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_count_leading_zeros(%arg0: tensor) -> tensor { - // CHECK: "vhlo.count_leading_zeros_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.count_leading_zeros_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.count_leading_zeros"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_collective_permute" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { - // CHECK: "vhlo.collective_permute_v1"(%arg0) <{ + // CHECK: "vhlo.collective_permute_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: source_target_pairs = #vhlo.tensor_v1 : tensor<3x2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x8x!vhlo.f32_v1> @@ -1032,8 +1107,9 @@ func.func @op_collective_permute(%arg0: tensor<16x8xf32>) -> tensor<16x8xf32> { } // CHECK-LABEL: "op_compare" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.compare_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.compare_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: compare_type = #vhlo, // CHECK-SAME: comparison_direction = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1045,15 +1121,17 @@ func.func @op_compare(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_complex" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_complex(%arg0: tensor, %arg1: tensor) -> tensor> { - // CHECK: "vhlo.complex_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.complex_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.complex"(%arg0, %arg1) : (tensor, tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_composite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_composite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.composite_v1"(%arg0) <{ + // CHECK: "vhlo.composite_v1"(%[[ARG0]]) <{ // CHECK-SAME: composite_attributes = #vhlo.dict_v1<{#vhlo.string_v1<"my_int"> = #vhlo.integer_v1<1 : i64>, #vhlo.string_v1<"my_string"> = #vhlo.string_v1<"foo">}> // CHECK-SAME: decomposition = #vhlo.string_v1<"composite_target"> // CHECK-SAME: name = #vhlo.string_v1<"stablehlo.composite_target"> @@ -1072,8 +1150,9 @@ func.func @op_composite(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_concatenate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.concatenate_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.concatenate_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x!vhlo.f32_v1>, !vhlo.tensor_v1<8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.concatenate"(%arg0, %arg1) { @@ -1083,6 +1162,7 @@ func.func @op_concatenate(%arg0: tensor<8xf32>, %arg1: tensor<8xf32>) -> tensor< } // CHECK-LABEL: "op_constant" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_constant(%arg0: tensor) -> tensor { // CHECK: "vhlo.constant_v1"() <{ // CHECK-SAME: value = #vhlo.tensor_v1 : tensor> @@ -1094,15 +1174,17 @@ func.func @op_constant(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_convert" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_convert(%arg0: tensor) -> tensor { - // CHECK: "vhlo.convert_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.convert_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.convert"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_convolution" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>) -> tensor<1x7x7x16xf32> { - // CHECK: "vhlo.convolution_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.convolution_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1136,8 +1218,9 @@ func.func @op_convolution(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16 } // CHECK-LABEL: "op_cosine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cosine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cosine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.cosine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cosine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1150,8 +1233,9 @@ func.func @op_create_token() -> !stablehlo.token { } // CHECK-LABEL: "op_cross_replica_sum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { - // CHECK: "vhlo.cross-replica-sum_v1"(%arg0) <{ + // CHECK: "vhlo.cross-replica-sum_v1"(%[[ARG0]]) <{ // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.cross-replica-sum"(%arg0) { @@ -1161,8 +1245,9 @@ func.func @op_cross_replica_sum(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_custom_call" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_custom_call(%arg0: tensor) -> tensor { - // CHECK: "vhlo.custom_call_v1"(%arg0) <{ + // CHECK: "vhlo.custom_call_v1"(%[[ARG0]]) <{ // CHECK-SAME: api_version = #vhlo, // CHECK-SAME: backend_config = #vhlo.string_v1<"\08\03\1A\02">, // CHECK-SAME: call_target_name = #vhlo.string_v1<"foo">, @@ -1193,15 +1278,17 @@ func.func @op_custom_call(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_divide" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_divide(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.divide_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.divide_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.divide"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dot_general" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) -> tensor<8x8x8xf32> { - // CHECK: "vhlo.dot_general_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_general_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: lhs_batching_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: lhs_contracting_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]>, @@ -1221,8 +1308,9 @@ func.func @op_dot_general(%arg0: tensor<8x8x16xf32>, %arg1: tensor<8x16x8xf32>) } // CHECK-LABEL: "op_dot" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.dot_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dot_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: precision_config = #vhlo.array_v1<[#vhlo, #vhlo]> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.dot"(%arg0, %arg1) { @@ -1232,8 +1320,9 @@ func.func @op_dot(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x } // CHECK-LABEL: "op_dynamic_broadcast_in_dim" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_broadcast_in_dim_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: broadcast_dimensions = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: known_expanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: known_nonexpanding_dimensions = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1247,8 +1336,9 @@ func.func @op_dynamic_broadcast_in_dim(%arg0: tensor, %arg1: tensor<2xi } // CHECK-LABEL: "op_dynamic_conv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x16xf32>, %arg2: tensor<2x2xi64>) -> tensor<1x?x?x16xf32> { - // CHECK: "vhlo.dynamic_conv_v2"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_conv_v2"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: batch_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: feature_group_count = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: input_batch_dimension = #vhlo.integer_v1<0 : i64>, @@ -1280,8 +1370,9 @@ func.func @op_dynamic_conv(%arg0: tensor<1x8x8x207xf32>, %arg1: tensor<3x3x207x1 } // CHECK-LABEL: "op_dynamic_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>, %arg2 : tensor<3xi32>) -> tensor<1x5x8xf32> { - // CHECK: "vhlo.dynamic_gather_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.dynamic_gather_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1301,8 +1392,9 @@ func.func @op_dynamic_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32 } // CHECK-LABEL: "op_dynamic_iota" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_iota_v1"(%arg0) <{ + // CHECK: "vhlo.dynamic_iota_v1"(%[[ARG0]]) <{ // CHECK-SAME: iota_dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_iota"(%arg0) { @@ -1312,22 +1404,25 @@ func.func @op_dynamic_iota(%arg0: tensor<1xindex>) -> tensor { } // CHECK-LABEL: "op_dynamic_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}, %[[ARG4:.*]]: {{.*}}) func.func @op_dynamic_pad(%arg0: tensor, %arg1: tensor, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>, %arg4: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.dynamic_pad_v1"(%arg0, %arg1, %arg2, %arg3, %arg4) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_pad_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]], %[[ARG4]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_pad"(%arg0, %arg1, %arg2, %arg3, %arg4) : (tensor, tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_reshape(%arg0: tensor<16xf32>, %arg1: tensor<2xindex>) -> tensor { - // CHECK: "vhlo.dynamic_reshape_v1"(%arg0, %arg1) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.dynamic_reshape_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<16xf32>, tensor<2xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor<4xf32> { - // CHECK: "vhlo.dynamic_slice_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.dynamic_slice_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: slice_sizes = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f32_v1> %0 = "stablehlo.dynamic_slice"(%arg0, %arg1) { @@ -1337,15 +1432,17 @@ func.func @op_dynamic_slice(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor } // CHECK-LABEL: "op_dynamic_update_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_dynamic_update_slice(%arg0: tensor<16xf32>, %arg1: tensor<4xf32>, %arg2: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.dynamic_update_slice_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> + // CHECK: "vhlo.dynamic_update_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1<4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.dynamic_update_slice"(%arg0, %arg1, %arg2) : (tensor<16xf32>, tensor<4xf32>, tensor) -> tensor<16xf32> func.return %0 : tensor<16xf32> } // CHECK-LABEL: "op_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor<8x8xf32> { - // CHECK: "vhlo.einsum_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.einsum_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab,bc->ac"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>, !vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x8x!vhlo.f32_v1> %0 = "stablehlo.einsum"(%arg0, %arg1) { @@ -1355,22 +1452,25 @@ func.func @op_einsum(%arg0: tensor<8x16xf32>, %arg1: tensor<16x8xf32>) -> tensor } // CHECK-LABEL: "op_exponential_minus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential_minus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_minus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_minus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential_minus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_exponential" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_exponential(%arg0: tensor) -> tensor { - // CHECK: "vhlo.exponential_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.exponential_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.exponential"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_fft" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { - // CHECK: "vhlo.fft_v1"(%arg0) <{ + // CHECK: "vhlo.fft_v1"(%[[ARG0]]) <{ // CHECK-SAME: fft_length = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: fft_type = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.complex_v1>) -> !vhlo.tensor_v1<16x!vhlo.complex_v1> @@ -1382,8 +1482,9 @@ func.func @op_fft(%arg0: tensor<16xcomplex>) -> tensor<16xcomplex> { } // CHECK-LABEL: "op_floor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_floor(%arg0: tensor) -> tensor { - // CHECK: "vhlo.floor_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.floor_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.floor"(%arg0) : (tensor) -> tensor func.return %0 : tensor } @@ -1396,16 +1497,17 @@ func.func private @op_func(%arg0: tensor {stablehlo.arg = "0"}) -> (tensor< // CHECK-SAME: sym_name = #vhlo.string_v1<"op_func">, // CHECK-SAME: sym_visibility = #vhlo.string_v1<"private"> // CHECK-SAME: }> ({ - // CHECK-NEXT: ^[[BB:bb.*]](%arg0: !vhlo.tensor_v1): - // CHECK-NEXT: "vhlo.return_v1"(%arg0) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG0:.*]]: !vhlo.tensor_v1): + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : () -> () func.return %arg0 : tensor } // CHECK-LABEL: "op_gather" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> tensor<1x5x1xf32> { - // CHECK: "vhlo.gather_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.gather_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: collapsed_slice_dims = #vhlo.tensor_v1 : tensor<2xi64>>, // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, @@ -1427,8 +1529,9 @@ func.func @op_gather(%arg0 : tensor<2x4x9xf32>, %arg1 : tensor<1x5x2xi32>) -> te } // CHECK-LABEL: "op_get_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_dimension_size(%arg0: tensor) -> tensor { - // CHECK: "vhlo.get_dimension_size_v1"(%arg0) <{ + // CHECK: "vhlo.get_dimension_size_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.get_dimension_size"(%arg0) { @@ -1438,8 +1541,9 @@ func.func @op_get_dimension_size(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_get_tuple_element" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tensor { - // CHECK: "vhlo.get_tuple_element_v1"(%arg0) <{ + // CHECK: "vhlo.get_tuple_element_v1"(%[[ARG0]]) <{ // CHECK-SAME: index = #vhlo.integer_v1<0 : i32> // CHECK-SAME: }> : (!vhlo.tuple_v1, !vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.get_tuple_element"(%arg0) { @@ -1449,11 +1553,12 @@ func.func @op_get_tuple_element(%arg0: tuple, tensor>) -> tenso } // CHECK-LABEL: "op_if" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.if_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.if_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { - // CHECK-NEXT: "vhlo.return_v1"(%arg2) : (!vhlo.tensor_v1) -> () + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG2]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.if"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1464,15 +1569,17 @@ func.func @op_if(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> t } // CHECK-LABEL: "op_imag" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_imag(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.imag_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.imag_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.imag"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_infeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_infeed(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.infeed_v1"(%arg0) <{ + // CHECK: "vhlo.infeed_v1"(%[[ARG0]]) <{ // CHECK-SAME: infeed_config = #vhlo.string_v1<"foo">, // CHECK-SAME{LITERAL}: layout = #vhlo.array_v1<[#vhlo.array_v1<[]>]> // CHECK-SAME: }> : (!vhlo.token_v1) -> (!vhlo.tensor_v1, !vhlo.token_v1) @@ -1495,36 +1602,41 @@ func.func @op_iota() -> tensor<16xf32> { } // CHECK-LABEL: "op_is_finite" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_is_finite(%arg0: tensor) -> tensor { - // CHECK: "vhlo.is_finite_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.is_finite_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.is_finite"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_log_plus_one" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_log_plus_one(%arg0: tensor) -> tensor { - // CHECK: "vhlo.log_plus_one_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.log_plus_one_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.log_plus_one"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_logistic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_logistic(%arg0: tensor) -> tensor { - // CHECK: "vhlo.logistic_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.logistic_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.logistic"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_map" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.map_v1"(%arg0) <{ + // CHECK: "vhlo.map_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): @@ -1542,57 +1654,65 @@ func.func @op_map(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_maximum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_maximum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.maximum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.maximum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.maximum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_minimum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_minimum(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.minimum_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.minimum_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.minimum"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_multiply" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_multiply(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.multiply_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.multiply_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.multiply"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_negate" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_negate(%arg0: tensor) -> tensor { - // CHECK: "vhlo.negate_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.negate_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.negate"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_not" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_not(%arg0: tensor) -> tensor { - // CHECK: "vhlo.not_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.not_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.not"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_optimization_barrier" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_optimization_barrier(%arg0: tensor) -> tensor { - // CHECK: "vhlo.optimization_barrier_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.optimization_barrier_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.optimization_barrier"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_or" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_or(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.or_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.or_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.or"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_outfeed" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.outfeed_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.outfeed_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: outfeed_config = #vhlo.string_v1<"foo"> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.token_v1) -> !vhlo.token_v1 %0 = "stablehlo.outfeed"(%arg0, %arg1) { @@ -1602,8 +1722,9 @@ func.func @op_outfeed(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo } // CHECK-LABEL: "op_pad" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.pad_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.pad_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: edge_padding_high = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: edge_padding_low = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: interior_padding = #vhlo.tensor_v1 : tensor<1xi64>> @@ -1617,36 +1738,41 @@ func.func @op_pad(%arg0: tensor<8xf32>, %arg1: tensor) -> tensor<16xf32> { } // CHECK-LABEL: "op_popcnt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_popcnt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.popcnt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.popcnt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.popcnt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_power" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_power(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.power_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.power_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.power"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real_dynamic_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}, %[[ARG3:.*]]: {{.*}}) func.func @op_real_dynamic_slice(%arg0: tensor, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor { - // CHECK: "vhlo.real_dynamic_slice_v1"(%arg0, %arg1, %arg2, %arg3) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_dynamic_slice_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>, !vhlo.tensor_v1<1x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_real" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_real(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.real_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.real_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.real"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_recv" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { - // CHECK: "vhlo.recv_v1"(%arg0) <{ + // CHECK: "vhlo.recv_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<3 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -1659,8 +1785,9 @@ func.func @op_recv(%arg0: !stablehlo.token) -> (tensor, !stablehlo.token) { } // CHECK-LABEL: "op_reduce" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0]], %[[ARG1]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1675,8 +1802,9 @@ func.func @op_reduce(%arg0: tensor<16xf32>, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reduce_precision" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_precision(%arg0: tensor) -> tensor { - // CHECK: "vhlo.reduce_precision_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_precision_v1"(%[[ARG0]]) <{ // CHECK-SAME: exponent_bits = #vhlo.integer_v1<8 : i32> // CHECK-SAME: mantissa_bits = #vhlo.integer_v1<10 : i32> // CHECK-SAME: }> : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 @@ -1690,7 +1818,7 @@ func.func @op_reduce_precision(%arg0: tensor) -> tensor { // CHECK_lABEL: "op_reduce_with_promotable_types" func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tensor) -> (tensor<4xf64>) { - // CHECK: "vhlo.reduce_v1"(%arg0, %arg1) + // CHECK: "vhlo.reduce_v1"(%[[ARG0:.*]], %[[ARG1:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x4x!vhlo.f32_v1>, !vhlo.tensor_v1) -> !vhlo.tensor_v1<4x!vhlo.f64_v1> @@ -1705,8 +1833,9 @@ func.func @op_reduce_with_promotable_types(%arg0: tensor<4x4xf32>, %arg1 : tenso } // CHECK-LABEL: "op_reduce_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) <{ + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<1 : i64>, // CHECK-SAME{LITERAL}: replica_groups = #vhlo.tensor_v1 : tensor<2x1xi64>>, // CHECK-SAME: scatter_dimension = #vhlo.integer_v1<0 : i64> @@ -1731,7 +1860,7 @@ func.func @op_reduce_scatter(%arg0: tensor<16xf32>) -> tensor<16xf32> { // CHECK_lABEL: "op_reduce_scatter_with_promotable_types" func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> tensor<4x4xf64> { - // CHECK: "vhlo.reduce_scatter_v1"(%arg0) + // CHECK: "vhlo.reduce_scatter_v1"(%[[ARG0:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f64_v1> @@ -1748,8 +1877,9 @@ func.func @op_reduce_scatter_with_promotable_types(%data: tensor<4x16xf32>) -> t // CHECK-LABEL: "op_reduce_window" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> tensor<2x9x16x7xf32> { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: base_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME{LITERAL}: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dilations = #vhlo.tensor_v1 : tensor<4xi64>>, @@ -1774,11 +1904,11 @@ func.func @op_reduce_window(%arg0: tensor<2x17x31x7xf32>, %arg1: tensor) -> func.return %0 : tensor<2x9x16x7xf32> } -// CHECK_lABEL: "op_reduce_window_with_promotable_types" +// CHECK-LABEL: "op_reduce_window_with_promotable_types" func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, %arg1: tensor<4x2xf32>, %init0: tensor, %init1: tensor) -> (tensor<2x2xf64>, tensor<2x2xf32>) { - // CHECK: "vhlo.reduce_window_v1"(%arg0, %arg1, %arg2, %arg3) + // CHECK: "vhlo.reduce_window_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]], %[[ARG3:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1, %[[ARG3:arg.*]]: !vhlo.tensor_v1, %[[ARG4:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]], %[[VAL2:.*]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1<4x2x!vhlo.f32_v1>, !vhlo.tensor_v1, !vhlo.tensor_v1) -> (!vhlo.tensor_v1<2x2x!vhlo.f64_v1>, !vhlo.tensor_v1<2x2x!vhlo.f32_v1>) @@ -1798,8 +1928,9 @@ func.func @op_reduce_window_with_promotable_types(%arg0: tensor<4x2xf32>, } // CHECK-LABEL: "op_remainder" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_remainder(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.remainder_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.remainder_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.remainder"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -1819,16 +1950,18 @@ func.func @op_partition_id() -> tensor { } // CHECK-LABEL: "op_reshape" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reshape(%arg0: tensor<16xf32>) -> tensor<4x4xf32> { - // CHECK: "vhlo.reshape_v1"(%arg0) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> + // CHECK: "vhlo.reshape_v1"(%[[ARG0]]) : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<4x4x!vhlo.f32_v1> %0 = "stablehlo.reshape"(%arg0) : (tensor<16xf32>) -> tensor<4x4xf32> func.return %0 : tensor<4x4xf32> } // CHECK-LABEL: "op_return" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.case_v1"(%arg0) ({ - // CHECK-NEXT: "vhlo.return_v1"(%arg1) : (!vhlo.tensor_v1) -> () + // CHECK: "vhlo.case_v1"(%[[ARG0]]) ({ + // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.case"(%arg0) ({ "stablehlo.return"(%arg1) : (tensor) -> () @@ -1837,8 +1970,9 @@ func.func @op_return(%arg0: tensor, %arg1: tensor) -> tensor { } // CHECK-LABEL: "op_reverse" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.reverse_v1"(%arg0) <{ + // CHECK: "vhlo.reverse_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimensions = #vhlo.tensor_v1 : tensor<1xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.reverse"(%arg0) { @@ -1848,8 +1982,9 @@ func.func @op_reverse(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_rng_bit_generator" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor) { - // CHECK: "vhlo.rng_bit_generator_v1"(%arg0) <{ + // CHECK: "vhlo.rng_bit_generator_v1"(%[[ARG0]]) <{ // CHECK-SAME: rng_algorithm = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1) -> (!vhlo.tensor_v1, !vhlo.tensor_v1) %0:2 = "stablehlo.rng_bit_generator"(%arg0) { @@ -1859,8 +1994,9 @@ func.func @op_rng_bit_generator(%arg0: tensor) -> (tensor, tensor } // CHECK-LABEL: "op_rng" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex>) -> tensor { - // CHECK: "vhlo.rng_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.rng_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: rng_distribution = #vhlo // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1<0x!vhlo.index_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.rng"(%arg0, %arg1, %arg2) { @@ -1870,29 +2006,33 @@ func.func @op_rng(%arg0: tensor, %arg1: tensor, %arg2: tensor<0xindex> } // CHECK-LABEL: "op_round_nearest_afz" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_afz(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_afz_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_afz_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_afz"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_round_nearest_even" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_round_nearest_even(%arg0: tensor) -> tensor { - // CHECK: "vhlo.round_nearest_even_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.round_nearest_even_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.round_nearest_even"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_rsqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_rsqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.rsqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.rsqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.rsqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, %arg2: tensor<10x300xf32>) -> tensor<200x100x300xf32> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: index_vector_dim = #vhlo.integer_v1<1 : i64>, // CHECK-SAME: indices_are_sorted = #vhlo.bool_v1, // CHECK-SAME: inserted_window_dims = #vhlo.tensor_v1 : tensor<2xi64>>, @@ -1925,7 +2065,7 @@ func.func @op_scatter(%arg0: tensor<200x100x300xf32>, %arg1: tensor<10x2xi32>, % func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf32>, %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> tensor<200x100x300xf64> { - // CHECK: "vhlo.scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.scatter_v1"(%[[ARG0:.*]], %[[ARG1:.*]], %[[ARG2:.*]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: "vhlo.return_v1"(%[[VAL1:.*]]) : (!vhlo.tensor_v1) -> () // CHECK: }) : (!vhlo.tensor_v1<200x100x300x!vhlo.f32_v1>, !vhlo.tensor_v1<10x2x!vhlo.i32_v1>, !vhlo.tensor_v1<10x300x!vhlo.f32_v1>) -> !vhlo.tensor_v1<200x100x300x!vhlo.f64_v1> @@ -1948,8 +2088,9 @@ func.func @op_scatter_with_promotable_types(%input_tensor: tensor<200x100x300xf3 } // CHECK-LABEL: "op_select_and_scatter" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf32> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) <{ + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) <{ // CHECK-SAME: padding = #vhlo.tensor_v1 : tensor<4x2xi64>>, // CHECK-SAME: window_dimensions = #vhlo.tensor_v1 : tensor<4xi64>>, // CHECK-SAME: window_strides = #vhlo.tensor_v1 : tensor<4xi64>> @@ -1979,8 +2120,9 @@ func.func @op_select_and_scatter(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<1 } // CHECK-LABEL: "op_select_and_scatter_with_promotable_types" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64xf32>, %arg1: tensor<12x13x13x66xf32>, %arg2: tensor) -> tensor<10x24x24x64xf64> { - // CHECK: "vhlo.select_and_scatter_v1"(%arg0, %arg1, %arg2) + // CHECK: "vhlo.select_and_scatter_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) // CHECK: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1, %[[ARG2:arg.*]]: !vhlo.tensor_v1): // CHECK: %[[VAL:.*]] = "vhlo.add_v1"(%[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 // CHECK: "vhlo.return_v1"(%[[VAL]]) : (!vhlo.tensor_v1) -> () @@ -2002,15 +2144,17 @@ func.func @op_select_and_scatter_with_promotable_types(%arg0: tensor<10x24x24x64 } // CHECK-LABEL: "op_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}, %[[ARG2:.*]]: {{.*}}) func.func @op_select(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - // CHECK: "vhlo.select_v1"(%arg0, %arg1, %arg2) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.select_v1"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.select"(%arg0, %arg1, %arg2) : (tensor, tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_send" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.send_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.send_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: channel_id = #vhlo.integer_v1<0 : i64>, // CHECK-SAME: channel_type = #vhlo.integer_v1<2 : i64>, // CHECK-SAME: is_host_transfer = #vhlo.bool_v1 @@ -2023,8 +2167,9 @@ func.func @op_send(%arg0: tensor, %arg1: !stablehlo.token) -> !stablehlo.to } // CHECK-LABEL: "op_set_dimension_size" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> tensor<16xf32> { - // CHECK: "vhlo.set_dimension_size_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.set_dimension_size_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1<16x!vhlo.f32_v1> %0 = "stablehlo.set_dimension_size"(%arg0, %arg1) { @@ -2034,43 +2179,49 @@ func.func @op_set_dimension_size(%arg0: tensor, %arg1: tensor) -> te } // CHECK-LABEL: "op_shift_left" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_left(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_left_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_left_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_left"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_arithmetic" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_arithmetic(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_arithmetic_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_arithmetic_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_arithmetic"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_shift_right_logical" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_shift_right_logical(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.shift_right_logical_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.shift_right_logical_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.shift_right_logical"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sign" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sign(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sign_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sign_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sign"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_sine" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sine(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sine_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sine_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sine"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_slice" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { - // CHECK: "vhlo.slice_v1"(%arg0) <{ + // CHECK: "vhlo.slice_v1"(%[[ARG0]]) <{ // CHECK-SAME: limit_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: start_indices = #vhlo.tensor_v1 : tensor<1xi64>>, // CHECK-SAME: strides = #vhlo.tensor_v1 : tensor<1xi64>> @@ -2084,8 +2235,9 @@ func.func @op_slice(%arg0: tensor<16xf32>) -> tensor<4xf32> { } // CHECK-LABEL: "op_sort" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { - // CHECK: "vhlo.sort_v1"(%arg0) <{ + // CHECK: "vhlo.sort_v1"(%[[ARG0]]) <{ // CHECK-SAME: dimension = #vhlo.integer_v1<0 : i64> // CHECK-SAME: is_stable = #vhlo.bool_v1 // CHECK-SAME: }> ({ @@ -2105,29 +2257,33 @@ func.func @op_sort(%arg0: tensor<16xf32>) -> tensor<16xf32> { } // CHECK-LABEL: "op_sqrt" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_sqrt(%arg0: tensor) -> tensor { - // CHECK: "vhlo.sqrt_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.sqrt_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.sqrt"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_subtract" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_subtract(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.subtract_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.subtract_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.subtract"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_tanh" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tanh(%arg0: tensor) -> tensor { - // CHECK: "vhlo.tanh_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.tanh_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.tanh"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_torch_index_select" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) -> tensor<2x1x5xf32> { - // CHECK: "vhlo.torch_index_select_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.torch_index_select_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: batch_dims = #vhlo.integer_v1<0 : i64> // CHECK-SAME: dim = #vhlo.integer_v1<0 : i64> // CHECK-SAME: }> : (!vhlo.tensor_v1<5x1x5x!vhlo.f32_v1>, !vhlo.tensor_v1<2x!vhlo.i32_v1>) -> !vhlo.tensor_v1<2x1x5x!vhlo.f32_v1> @@ -2139,8 +2295,9 @@ func.func @op_torch_index_select(%arg0: tensor<5x1x5xf32>, %arg1: tensor<2xi32>) } // CHECK-LABEL: "op_transpose" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { - // CHECK: "vhlo.transpose_v1"(%arg0) <{ + // CHECK: "vhlo.transpose_v1"(%[[ARG0]]) <{ // CHECK-SAME: permutation = #vhlo.tensor_v1 : tensor<2xi64>> // CHECK-SAME: }> : (!vhlo.tensor_v1<16x8x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x16x!vhlo.f32_v1> %0 = "stablehlo.transpose"(%arg0) { @@ -2150,8 +2307,9 @@ func.func @op_transpose(%arg0: tensor<16x8xf32>) -> tensor<8x16xf32> { } // CHECK-LABEL: "op_triangular_solve" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32>) -> tensor<16x16xf32> { - // CHECK: "vhlo.triangular_solve_v1"(%arg0, %arg1) <{ + // CHECK: "vhlo.triangular_solve_v1"(%[[ARG0]], %[[ARG1]]) <{ // CHECK-SAME: left_side = #vhlo.bool_v1, // CHECK-SAME: lower = #vhlo.bool_v1, // CHECK-SAME: transpose_a = #vhlo, @@ -2167,15 +2325,17 @@ func.func @op_triangular_solve(%arg0: tensor<16x16xf32>, %arg1: tensor<16x16xf32 } // CHECK-LABEL: "op_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_tuple(%arg0: tensor) -> tuple> { - // CHECK: "vhlo.tuple_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> + // CHECK: "vhlo.tuple_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tuple_v1> %0 = "stablehlo.tuple"(%arg0) : (tensor) -> tuple> func.return %0 : tuple> } // CHECK-LABEL: "op_unary_einsum" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { - // CHECK: "vhlo.unary_einsum_v1"(%arg0) <{ + // CHECK: "vhlo.unary_einsum_v1"(%[[ARG0]]) <{ // CHECK-SAME: einsum_config = #vhlo.string_v1<"ab->a"> // CHECK-SAME: }> : (!vhlo.tensor_v1<8x16x!vhlo.f32_v1>) -> !vhlo.tensor_v1<8x!vhlo.f32_v1> %0 = "stablehlo.unary_einsum"(%arg0) { @@ -2185,22 +2345,25 @@ func.func @op_unary_einsum(%arg0: tensor<8x16xf32>) -> tensor<8xf32> { } // CHECK-LABEL: "op_uniform_dequantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_dequantize(%arg0: tensor>) -> tensor { - // CHECK: "vhlo.uniform_dequantize_v1"(%arg0) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 + // CHECK: "vhlo.uniform_dequantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1>) -> !vhlo.tensor_v1 %0 = "stablehlo.uniform_dequantize"(%arg0) : (tensor>) -> tensor func.return %0 : tensor } // CHECK-LABEL: "op_uniform_quantize" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_uniform_quantize(%arg0: tensor) -> tensor> { - // CHECK: "vhlo.uniform_quantize_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> + // CHECK: "vhlo.uniform_quantize_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1> %0 = "stablehlo.uniform_quantize"(%arg0) : (tensor) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "op_while" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @op_while(%arg0: tensor) -> tensor { - // CHECK: "vhlo.while_v1"(%arg0) ({ + // CHECK: "vhlo.while_v1"(%[[ARG0]]) ({ // CHECK-NEXT: ^[[BB:bb.*]](%[[ARG1:arg.*]]: !vhlo.tensor_v1): // CHECK-NEXT: "vhlo.return_v1"(%[[ARG1]]) : (!vhlo.tensor_v1) -> () // CHECK-NEXT: }, { @@ -2218,8 +2381,9 @@ func.func @op_while(%arg0: tensor) -> tensor { } // CHECK-LABEL: "op_xor" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.xor_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.xor_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.xor"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } @@ -2227,197 +2391,225 @@ func.func @op_xor(%arg0: tensor, %arg1: tensor) -> tensor { // ============ TYPES ============ // CHECK-LABEL: "type_i1" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i1(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.and_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.and_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.and"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_i64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_i64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui4" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui4(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui8" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui8(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_ui64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_ui64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FN" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FN(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E4M3B11FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E4M3B11FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f8E5M2FNUZ" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f8E5M2FNUZ(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_bf16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_bf16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f16" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f16(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f32(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_f64(%arg0: tensor, %arg1: tensor) -> tensor { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1, !vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.add"(%arg0, %arg1) : (tensor, tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_complex_f32" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f32(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_complex_f64" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_complex_f64(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_dynamism_ranked" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_dynamism_ranked(%arg0: tensor) -> tensor { - // CHECK: "vhlo.abs_v1"(%arg0) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 + // CHECK: "vhlo.abs_v1"(%[[ARG0]]) : (!vhlo.tensor_v1) -> !vhlo.tensor_v1 %0 = "stablehlo.abs"(%arg0) : (tensor) -> tensor func.return %0 : tensor } // CHECK-LABEL: "type_per_tensor_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}, %[[ARG1:.*]]: {{.*}}) func.func @type_per_tensor_quantization(%arg0: tensor>, %arg1: tensor>) -> tensor> { - // CHECK: "vhlo.add_v1"(%arg0, %arg1) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG1]]) : (!vhlo.tensor_v1>, !vhlo.tensor_v1>) -> !vhlo.tensor_v1> %0 = "stablehlo.add"(%arg0, %arg1) : (tensor>, tensor>) -> tensor> func.return %0 : tensor> } // CHECK-LABEL: "type_per_axis_quantization" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_per_axis_quantization(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> { - // CHECK: "vhlo.add_v1"(%arg0, %arg0) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> + // CHECK: "vhlo.add_v1"(%[[ARG0]], %[[ARG0]]) : (!vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>, !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1>) -> !vhlo.tensor_v1<2x!vhlo.quant_per_axis_v1> %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform> func.return %0 : tensor<2x!quant.uniform> } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_callee" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_callee(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.return_v1"(%arg0) : (!vhlo.token_v1) -> () + // CHECK: "vhlo.return_v1"(%[[ARG0]]) : (!vhlo.token_v1) -> () return %arg0 : !stablehlo.token } // CHECK: function_type = #vhlo.type_v1 !vhlo.token_v1>> // CHECK-LABEL: "type_token_caller" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_token_caller(%arg0: !stablehlo.token) -> !stablehlo.token { - // CHECK: "vhlo.call_v1"(%arg0) <{callee = #vhlo.string_v1<"type_token_callee">} + // CHECK: "vhlo.call_v1"(%[[ARG0]]) <{callee = #vhlo.string_v1<"type_token_callee">} // CHECK-SAME: (!vhlo.token_v1) -> !vhlo.token_v1 %0 = func.call @type_token_callee(%arg0) : (!stablehlo.token) -> !stablehlo.token return %0 : !stablehlo.token } // CHECK-LABEL: "type_tuple" +// CHECK-NEXT: (%[[ARG0:.*]]: {{.*}}) func.func @type_tuple(%arg0: tuple>) -> tuple { %0 = "stablehlo.custom_call"(%arg0) { call_target_name = "foo"