From 553cecb0a3e946d603a4edb1dad04662dbcf7b4e Mon Sep 17 00:00:00 2001 From: SUN Haibo Date: Fri, 26 Jan 2024 18:58:46 -0500 Subject: [PATCH] Update functional.py --- bitsandbytes/functional.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bitsandbytes/functional.py b/bitsandbytes/functional.py index 0b18d9b06..a461d1749 100644 --- a/bitsandbytes/functional.py +++ b/bitsandbytes/functional.py @@ -890,7 +890,7 @@ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_storage=torch.uint8): return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4', quant_storage) -def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4', quant_storage=torch.uint8) -> Tensor: +def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4', quant_storage=torch.uint8) -> (Tensor, QuantState): """ Quantize tensor A in blocks of 4-bit values.