Skip to content

Commit

Permalink
Replace quantize
Browse files Browse the repository at this point in the history
  • Loading branch information
alanwaketan committed Feb 2, 2024
1 parent e3b9ce1 commit bb075e3
Showing 1 changed file with 6 additions and 7 deletions.
13 changes: 6 additions & 7 deletions torch_xla/experimental/quantized.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
import numpy as np
import torch
from torch.library import impl
import torch_xla
from torch.library import Library, impl
from torch_xla.core.xla_model import XLA_LIB

quantized_decomposed_lib = Library("quantized_decomposed", "IMPL")


@impl(quantized_decomposed_lib, "quantize_per_tensor", "XLA")
@impl(XLA_LIB, "quantize_per_tensor", "XLA")
def xla_quantize_per_tensor(input: torch.Tensor, scale: float, zero_point: int,
quant_min: int, quant_max: int, dtype: torch.dtype):
return _xla_quantize(input, torch.tensor([scale]),
torch.tensor([zero_point], dtype=dtype), quant_min,
quant_max, dtype)


@impl(quantized_decomposed_lib, "quantize_per_channel", "XLA")
@impl(XLA_LIB, "quantize_per_channel", "XLA")
def xla_quantize_per_channel(input: torch.Tensor, scale: torch.Tensor,
zero_point: torch.Tensor, axis: int,
quant_min: int, quant_max: int,
Expand All @@ -23,7 +22,7 @@ def xla_quantize_per_channel(input: torch.Tensor, scale: torch.Tensor,
axis)


@impl(quantized_decomposed_lib, "dequantize_per_tensor", "XLA")
@impl(XLA_LIB, "dequantize_per_tensor", "XLA")
def xla_dequantize_per_tensor(input: torch.Tensor, scale: float,
zero_point: int, quant_min: int, quant_max: int,
dtype: torch.dtype):
Expand All @@ -32,7 +31,7 @@ def xla_dequantize_per_tensor(input: torch.Tensor, scale: float,
quant_max, dtype)


@impl(quantized_decomposed_lib, "dequantize_per_channel", "XLA")
@impl(XLA_LIB, "dequantize_per_channel", "XLA")
def xla_dequantize_per_tensor(input: torch.Tensor, scale: torch.Tensor,
zero_point: torch.Tensor, axis: int,
quant_min: int, quant_max: int,
Expand Down

0 comments on commit bb075e3

Please sign in to comment.