From 1c3ad5c944049bf56b25683114326810b26978f8 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 3 Sep 2024 18:33:09 +0000 Subject: [PATCH] revert changes not needed for compression --- src/transformers/modeling_utils.py | 2 +- .../quantizers/quantizer_compressed_tensors.py | 8 -------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 125dd629f4cc5e..75ed37904d7a04 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -4035,7 +4035,7 @@ def from_pretrained( dispatch_model(model, **device_map_kwargs) if hf_quantizer is not None: - hf_quantizer.postprocess_model(model, resolved_archive_file=resolved_archive_file) + hf_quantizer.postprocess_model(model) model.hf_quantizer = hf_quantizer if _adapter_model_path is not None: diff --git a/src/transformers/quantizers/quantizer_compressed_tensors.py b/src/transformers/quantizers/quantizer_compressed_tensors.py index 7bde1feca7c14a..8381f3d4165c87 100644 --- a/src/transformers/quantizers/quantizer_compressed_tensors.py +++ b/src/transformers/quantizers/quantizer_compressed_tensors.py @@ -68,14 +68,6 @@ def _is_compressed_key(key: str) -> bool: return [key for key in unexpected_keys if not _is_compressed_key(key)] - def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: - def _is_decompressed_key(key: str) -> bool: - # key names in decompressed state dict that will not be present in - # a compressed state dict - return key.endswith("weight") or "scale" in key or "zero_point" in key - - return [key for key in missing_keys if not _is_decompressed_key(key)] - def _process_model_before_weight_loading(self, model, **kwargs): from compressed_tensors.quantization import apply_quantization_config