From c3042ac99e8cb1ad45b09f6bec1eda3c5b6656d9 Mon Sep 17 00:00:00 2001 From: Akhmed Rakhmati Date: Wed, 26 Jun 2024 16:15:34 +0000 Subject: [PATCH] #9681: set __name__ attribute for ttnn operations when fast runtime mode is disabled --- ttnn/ttnn/decorators.py | 38 ++++++++++++++++++++++++--------- ttnn/ttnn/operations/binary.py | 2 -- ttnn/ttnn/operations/losses.py | 1 - ttnn/ttnn/operations/ternary.py | 2 -- ttnn/ttnn/operations/unary.py | 4 ---- 5 files changed, 28 insertions(+), 19 deletions(-) diff --git a/ttnn/ttnn/decorators.py b/ttnn/ttnn/decorators.py index 7b2b3fd44df..fc924ecd649 100644 --- a/ttnn/ttnn/decorators.py +++ b/ttnn/ttnn/decorators.py @@ -380,6 +380,10 @@ class Operation: is_cpp_function: bool is_experimental: bool + @property + def __name__(self): + return self.python_fully_qualified_name + def __gt__(self, other): return self.python_fully_qualified_name < other.python_fully_qualified_name @@ -748,6 +752,18 @@ def operation_decorator(function: callable): global OPERATION_TO_GOLDEN_FUNCTION global OPERATION_TO_FALLBACK_FUNCTION + is_cpp_function = hasattr(function, "__ttnn__") + + python_fully_qualified_name = name + if is_cpp_function: + if doc is not None: + raise RuntimeError(f"Registering {name}: documentation for C++ function has to be set from C++") + if python_fully_qualified_name is not None: + raise RuntimeError(f"Registering {name}: name is not allowed for ttnn functions") + python_fully_qualified_name = function.python_fully_qualified_name # Replace C++ name with python + elif not is_experimental: + logger.warning(f"{name} should be migrated to C++!") + def fallback_function(*function_args, **function_kwargs): preprocess_inputs = preprocess_golden_function_inputs or default_preprocess_golden_function_inputs postprocess_outputs = postprocess_golden_function_outputs or default_postprocess_golden_function_outputs @@ -759,19 +775,21 @@ def fallback_function(*function_args, **function_kwargs): return output if ttnn.CONFIG.enable_fast_runtime_mode: + + def name_decorator(function): + @wraps(function) + def wrapper(*args, **kwargs): + return function(*args, **kwargs) + + return wrapper + + function = name_decorator(function) + function.__name__ = python_fully_qualified_name + OPERATION_TO_GOLDEN_FUNCTION[function] = golden_function OPERATION_TO_FALLBACK_FUNCTION[function] = fallback_function - return function - - is_cpp_function = hasattr(function, "__ttnn__") - python_fully_qualified_name = name - if is_cpp_function: - if doc is not None: - raise RuntimeError(f"Registering {name}: documentation for C++ functiomn has to be set from C++") - if python_fully_qualified_name is not None: - raise RuntimeError(f"Registering {name}: name is not allowed for ttnn functions") - python_fully_qualified_name = function.python_fully_qualified_name # Replace C++ name with python + return function # Wrap functions before attaching documentation to avoid errors if doc is not None: diff --git a/ttnn/ttnn/operations/binary.py b/ttnn/ttnn/operations/binary.py index 7eddd6cf465..aa23b262219 100644 --- a/ttnn/ttnn/operations/binary.py +++ b/ttnn/ttnn/operations/binary.py @@ -49,7 +49,6 @@ def binary_function( return output_tensor if isinstance(binary_function, ttnn.decorators.Operation): - binary_function.__name__ = f"ttnn.{name}" binary_function.decorated_function.__doc__ = doc + ( binary_function.__doc__ if binary_function.__doc__ is not None else "" ) @@ -348,7 +347,6 @@ def elt_binary_function( return output_tensor if isinstance(elt_binary_function, ttnn.decorators.Operation): - elt_binary_function.__name__ = f"ttnn.{name}" elt_binary_function.decorated_function.__doc__ = f"""{name}(input_tensor_a: ttnn.Tensor, input_tensor_b: ttnn.Tensor, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Performs eltwise-binary {op_name} operation on two tensors :attr:`input_a` and :attr:`input_b`. diff --git a/ttnn/ttnn/operations/losses.py b/ttnn/ttnn/operations/losses.py index 8e296908b08..1c037307b13 100644 --- a/ttnn/ttnn/operations/losses.py +++ b/ttnn/ttnn/operations/losses.py @@ -79,7 +79,6 @@ def loss_function( return output_tensor if isinstance(loss_function, ttnn.decorators.Operation): - loss_function.__name__ = f"ttnn.{name}" loss_function.decorated_function.__doc__ = f"""{name}(input_tensor_a: ttnn.Tensor, input_tensor_b: ttnn.Tensor, loss_mode: str, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Applies {name} to :attr:`input_tensor_a` and :attr:`input_tensor_b` with loss_mode :attr:`loss_mode`. diff --git a/ttnn/ttnn/operations/ternary.py b/ttnn/ttnn/operations/ternary.py index 836958d3d88..217a7d1d264 100644 --- a/ttnn/ttnn/operations/ternary.py +++ b/ttnn/ttnn/operations/ternary.py @@ -99,7 +99,6 @@ def ternary_function( return output_tensor if isinstance(ternary_function, ttnn.decorators.Operation): - ternary_function.__name__ = f"ttnn.{name}" ternary_function.decorated_function.__doc__ = f"""{name}(input_tensor: ttnn.Tensor, input_tensor1: ttnn.Tensor, input_tensor2: ttnn.Tensor, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Returns tensor with the {name} of all of elements of the input tensors input, tensor1, tensor2. @@ -221,7 +220,6 @@ def ternary_function( return output_tensor if isinstance(ternary_function, ttnn.decorators.Operation): - ternary_function.__name__ = f"ttnn.{(name)}" ternary_function.decorated_function.__doc__ = f"""{(name)}(input_tensor: ttnn.Tensor, input_tensor1: ttnn.Tensor, input_tensor2: ttnn.Tensor, parameter, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Performs the element-wise {op_name} of tensor1 by tensor2, multiplies the result by the scalar value and adds it to input input. diff --git a/ttnn/ttnn/operations/unary.py b/ttnn/ttnn/operations/unary.py index 750b79a9550..a81828cb631 100644 --- a/ttnn/ttnn/operations/unary.py +++ b/ttnn/ttnn/operations/unary.py @@ -260,7 +260,6 @@ def unary_function( return output_tensor if isinstance(unary_function, ttnn.decorators.Operation): - unary_function.__name__ = f"ttnn.{(name)}" unary_function.decorated_function.__doc__ = f"""{(name)}(input_tensor: ttnn.Tensor, parameter, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Applies the {name} function to the elements of the input tensor :attr:`input_tensor` with :attr:`{param}` parameter. @@ -346,7 +345,6 @@ def activation_function( return output_tensor if isinstance(activation_function, ttnn.decorators.Operation): - activation_function.__name__ = f"ttnn.{(name)}" activation_function.decorated_function.__doc__ = f"""{(name)}(input_tensor: ttnn.Tensor, parameter, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Applies the {name} function to the elements of the input tensor :attr:`input_tensor` with :attr:`{param}` parameter. @@ -430,7 +428,6 @@ def activation_function( return output_tensor if isinstance(activation_function, ttnn.decorators.Operation): - activation_function.__name__ = f"ttnn.{(name)}" activation_function.decorated_function.__doc__ = f"""{(name)}(input_tensor: ttnn.Tensor, parameter, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Applies the {name} function to the elements of the input tensor :attr:`input_tensor` with :attr:`{param1_name}` and :attr:`{param2_name}` parameters. @@ -550,7 +547,6 @@ def activation_function( return output_tensor if isinstance(activation_function, ttnn.decorators.Operation): - activation_function.__name__ = f"ttnn.{(name)}" activation_function.decorated_function.__doc__ = f"""{(name)}(input_tensor: ttnn.Tensor, dim: int = -1, *, memory_config: ttnn.MemoryConfig = ttnn.DRAM_MEMORY_CONFIG) -> ttnn.Tensor Applies the {name} function to the elements of the input tensor :attr:`input_tensor` split along :attr:`{param}`.