From 9d8adca3a19d25d6dd87790db64eabb4b3f26bf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Marty?= <9808326+fxmarty@users.noreply.github.com> Date: Thu, 21 Sep 2023 11:09:19 +0200 Subject: [PATCH] fix provider availablity on ORT 1.16.0 release --- optimum/onnxruntime/utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py index 9a170fa3a0d..4152febb2d7 100644 --- a/optimum/onnxruntime/utils.py +++ b/optimum/onnxruntime/utils.py @@ -22,6 +22,7 @@ import numpy as np import torch +from packaging import version from transformers.utils import logging import onnxruntime as ort @@ -218,8 +219,13 @@ def validate_provider_availability(provider: str): Args: provider (str): Name of an ONNX Runtime execution provider. """ - # disable on Windows as reported in https://github.com/huggingface/optimum/issues/769 - if os.name != "nt" and provider in ["CUDAExecutionProvider", "TensorrtExecutionProvider"]: + # Disable on Windows as reported in https://github.com/huggingface/optimum/issues/769. + # Disable as well for ORT 1.16.0 that has changed changed the way _ld_preload.py is filled: https://github.com/huggingface/optimum/issues/1402. + if ( + version.parse(ort.__version__) < version.parse("1.16.0") + and os.name != "nt" + and provider in ["CUDAExecutionProvider", "TensorrtExecutionProvider"] + ): path_cuda_lib = os.path.join(ort.__path__[0], "capi", "libonnxruntime_providers_cuda.so") path_trt_lib = os.path.join(ort.__path__[0], "capi", "libonnxruntime_providers_tensorrt.so") path_dependecy_loading = os.path.join(ort.__path__[0], "capi", "_ld_preload.py")