From 20c2d98747a53ccd4a5413510fbcbb0c48d8e846 Mon Sep 17 00:00:00 2001 From: eaidova Date: Fri, 20 Dec 2024 10:43:40 +0400 Subject: [PATCH 1/2] update optimum version for having latest fixes --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d9b3b8642..f78052b4b 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ INSTALL_REQUIRE = [ "torch>=1.11", - "optimum~=1.23", + "optimum@git+https://github.com/huggingface/optimum.git", "transformers>=4.36,<4.47", "datasets>=1.4.0", "sentencepiece", From 3444cb4d83b9ba2c1c2f2d7f08fd4ef37067ca21 Mon Sep 17 00:00:00 2001 From: eaidova Date: Fri, 20 Dec 2024 10:49:52 +0400 Subject: [PATCH 2/2] update imports --- optimum/exporters/openvino/convert.py | 2 +- optimum/intel/openvino/quantization.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/optimum/exporters/openvino/convert.py b/optimum/exporters/openvino/convert.py index 66e6c13a2..432e52d0c 100644 --- a/optimum/exporters/openvino/convert.py +++ b/optimum/exporters/openvino/convert.py @@ -365,7 +365,7 @@ def export_pytorch( import torch from torch.utils._pytree import tree_map - from optimum.exporters.onnx.convert import check_dummy_inputs_are_allowed + from optimum.exporters.utils import check_dummy_inputs_are_allowed logger.info(f"Using framework PyTorch: {torch.__version__}") output = Path(output) diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 75dab9366..6cb6e60ce 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -523,7 +523,7 @@ def _quantize_torchmodel( quantization_config = ov_config.quantization_config if isinstance(quantization_config, OVWeightQuantizationConfig): - from optimum.exporters.onnx.convert import check_dummy_inputs_are_allowed + from optimum.exporters.utils import check_dummy_inputs_are_allowed if stateful: # patch model before weight compression