Skip to content

Commit

Permalink
Merge branch 'int8' of https://github.com/TimDettmers/bitsandbytes in…
Browse files Browse the repository at this point in the history
…to int8
  • Loading branch information
matthewdouglas committed Nov 5, 2024
2 parents a72c463 + 980279f commit b5d6135
Showing 1 changed file with 9 additions and 5 deletions.
14 changes: 9 additions & 5 deletions bitsandbytes/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -2223,7 +2223,7 @@ def batched_igemm(


@deprecated(
"igemmlt is deprecated and will be removed in a future release. " "Please use int8_linear_matmul instead.",
"igemmlt is deprecated and will be removed in a future release. Please use int8_linear_matmul instead.",
category=FutureWarning,
)
def igemmlt(
Expand Down Expand Up @@ -2365,9 +2365,9 @@ def mm_dequant(

def get_colrow_absmax(
A: torch.Tensor,
row_stats: torch.Tensor = None,
col_stats: torch.Tensor = None,
nnz_block_ptr: torch.Tensor = None,
row_stats: Optional[torch.Tensor] = None,
col_stats: Optional[torch.Tensor] = None,
nnz_block_ptr: Optional[torch.Tensor] = None,
threshold=0.0,
):
# Note: prior impl only works with fp16
Expand Down Expand Up @@ -2614,7 +2614,11 @@ def transform(A, to_order, from_order="row", out=None, transpose=False, state=No
return out, new_state


def spmm_coo(cooA: Union[COOSparseTensor, torch.Tensor], B: torch.Tensor, out: torch.Tensor = None):
def spmm_coo(
cooA: Union[COOSparseTensor, torch.Tensor],
B: torch.Tensor,
out: Optional[torch.Tensor] = None,
):
if not isinstance(cooA, COOSparseTensor):
assert (
cooA.is_sparse and cooA.layout == torch.sparse_coo
Expand Down

0 comments on commit b5d6135

Please sign in to comment.