diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index a316e50b4b0dc..d9a2b6dc34599 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -119,6 +119,10 @@ def get_device(self): else: return int(self.device.split(":")[-1]) + @property + def itemsize(self): + return self.element_size() + # Setters # # --------# @@ -721,6 +725,12 @@ def detach_(self): self.ivy_array = ivy.inplace_update(self.ivy_array, ret.ivy_array) return self + def cpu(self): + return ivy.to_device(self.ivy_array, "cpu") + + def cuda(self): + return ivy.to_device(self.ivy_array, "gpu:0") + @with_unsupported_dtypes({"2.2 and below": ("uint16",)}, "torch") @numpy_to_torch_style_args def unsqueeze(self, dim): @@ -1543,6 +1553,10 @@ def item(self): "only one element tensors can be converted to Python scalars" ) + def element_size(self): + dtype = ivy.dtype(self.ivy_array) + return int(ivy.dtype_bits(dtype) // 8) + @numpy_to_torch_style_args @with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch") def cumprod(self, dim, dtype=None):