Skip to content

Commit

Permalink
Remove ARCH_MODEL_MAP from tests (#1458)
Browse files Browse the repository at this point in the history
  • Loading branch information
baskrahmer authored Oct 17, 2023
1 parent 314f31a commit e7bd60d
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 34 deletions.
46 changes: 20 additions & 26 deletions tests/onnxruntime/test_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1244,8 +1244,6 @@ class ORTModelForMaskedLMIntegrationTest(ORTModelTestMixin):
"xlm_roberta",
]

ARCH_MODEL_MAP = {} # TODO remove

FULL_GRID = {"model_arch": SUPPORTED_ARCHITECTURES}
ORTMODEL_CLASS = ORTModelForMaskedLM
TASK = "fill-mask"
Expand All @@ -1261,7 +1259,7 @@ def test_compare_to_transformers(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForMaskedLM.from_pretrained(self.onnx_model_dirs[model_arch])

self.assertIsInstance(onnx_model.model, onnxruntime.InferenceSession)
Expand Down Expand Up @@ -1293,7 +1291,7 @@ def test_pipeline_ort_model(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForMaskedLM.from_pretrained(self.onnx_model_dirs[model_arch])
tokenizer = get_preprocessor(model_id)
pipe = pipeline("fill-mask", model=onnx_model, tokenizer=tokenizer)
Expand Down Expand Up @@ -1324,7 +1322,7 @@ def test_pipeline_on_gpu(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForMaskedLM.from_pretrained(self.onnx_model_dirs[model_arch])
tokenizer = get_preprocessor(model_id)
MASK_TOKEN = tokenizer.mask_token
Expand All @@ -1346,7 +1344,7 @@ def test_compare_to_io_binding(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForMaskedLM.from_pretrained(self.onnx_model_dirs[model_arch], use_io_binding=False).to(
"cuda"
)
Expand Down Expand Up @@ -1403,8 +1401,6 @@ class ORTModelForSequenceClassificationIntegrationTest(ORTModelTestMixin):
"xlm_roberta",
]

ARCH_MODEL_MAP = {} # TODO remove

FULL_GRID = {"model_arch": SUPPORTED_ARCHITECTURES}
ORTMODEL_CLASS = ORTModelForSequenceClassification
TASK = "text-classification"
Expand All @@ -1420,7 +1416,7 @@ def test_compare_to_transformers(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForSequenceClassification.from_pretrained(self.onnx_model_dirs[model_arch])

self.assertIsInstance(onnx_model.model, onnxruntime.InferenceSession)
Expand Down Expand Up @@ -1452,7 +1448,7 @@ def test_pipeline_ort_model(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForSequenceClassification.from_pretrained(self.onnx_model_dirs[model_arch])
tokenizer = get_preprocessor(model_id)
pipe = pipeline("text-classification", model=onnx_model, tokenizer=tokenizer)
Expand Down Expand Up @@ -1489,7 +1485,7 @@ def test_pipeline_on_gpu(self, test_name: str, model_arch: str, provider: str):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForSequenceClassification.from_pretrained(
self.onnx_model_dirs[model_arch], provider=provider
)
Expand Down Expand Up @@ -1529,7 +1525,7 @@ def test_compare_to_io_binding(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForSequenceClassification.from_pretrained(
self.onnx_model_dirs[model_arch], use_io_binding=False
).to("cuda")
Expand Down Expand Up @@ -2377,8 +2373,6 @@ class ORTModelForImageClassificationIntegrationTest(ORTModelTestMixin):
"vit",
]

ARCH_MODEL_MAP = {} # TODO remove

FULL_GRID = {"model_arch": SUPPORTED_ARCHITECTURES}
ORTMODEL_CLASS = ORTModelForImageClassification
TASK = "image-classification"
Expand All @@ -2394,7 +2388,7 @@ def test_compare_to_transformers(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = MODEL_NAMES[model_arch] if model_arch in MODEL_NAMES else self.ARCH_MODEL_MAP[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForImageClassification.from_pretrained(self.onnx_model_dirs[model_arch])

self.assertIsInstance(onnx_model.model, onnxruntime.InferenceSession)
Expand Down Expand Up @@ -2428,7 +2422,7 @@ def test_pipeline_ort_model(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForImageClassification.from_pretrained(self.onnx_model_dirs[model_arch])
preprocessor = get_preprocessor(model_id)
pipe = pipeline("image-classification", model=onnx_model, feature_extractor=preprocessor)
Expand Down Expand Up @@ -2465,7 +2459,7 @@ def test_pipeline_on_gpu(self, test_name: str, model_arch: str, provider: str):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForImageClassification.from_pretrained(
self.onnx_model_dirs[model_arch], provider=provider
)
Expand All @@ -2489,7 +2483,7 @@ def test_compare_to_io_binding(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForImageClassification.from_pretrained(
self.onnx_model_dirs[model_arch], use_io_binding=False
).to("cuda")
Expand Down Expand Up @@ -2689,7 +2683,7 @@ def test_compare_to_transformers(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForAudioClassification.from_pretrained(self.onnx_model_dirs[model_arch])

self.assertIsInstance(onnx_model.model, onnxruntime.InferenceSession)
Expand Down Expand Up @@ -2721,7 +2715,7 @@ def test_pipeline_ort_model(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForAudioClassification.from_pretrained(self.onnx_model_dirs[model_arch])
processor = AutoFeatureExtractor.from_pretrained(model_id)
pipe = pipeline("audio-classification", model=onnx_model, feature_extractor=processor, sampling_rate=220)
Expand Down Expand Up @@ -2759,7 +2753,7 @@ def test_pipeline_on_gpu(self, test_name: str, model_arch: str, provider: str):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForAudioClassification.from_pretrained(
self.onnx_model_dirs[model_arch], provider=provider
)
Expand All @@ -2782,7 +2776,7 @@ def test_compare_to_io_binding(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForAudioClassification.from_pretrained(
self.onnx_model_dirs[model_arch], use_io_binding=False
).to("cuda")
Expand Down Expand Up @@ -2841,7 +2835,7 @@ def test_compare_to_transformers(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForCTC.from_pretrained(self.onnx_model_dirs[model_arch])

self.assertIsInstance(onnx_model.model, onnxruntime.InferenceSession)
Expand Down Expand Up @@ -2900,7 +2894,7 @@ def test_compare_to_transformers(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForAudioXVector.from_pretrained(self.onnx_model_dirs[model_arch])

self.assertIsInstance(onnx_model.model, onnxruntime.InferenceSession)
Expand Down Expand Up @@ -2936,7 +2930,7 @@ def test_compare_to_io_binding(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForAudioXVector.from_pretrained(
self.onnx_model_dirs[model_arch], use_io_binding=False
).to("cuda")
Expand Down Expand Up @@ -2992,7 +2986,7 @@ def test_compare_to_transformers(self, model_arch):
model_args = {"test_name": model_arch, "model_arch": model_arch}
self._setup(model_args)

model_id = self.ARCH_MODEL_MAP[model_arch] if model_arch in self.ARCH_MODEL_MAP else MODEL_NAMES[model_arch]
model_id = MODEL_NAMES[model_arch]
onnx_model = ORTModelForAudioFrameClassification.from_pretrained(self.onnx_model_dirs[model_arch])

self.assertIsInstance(onnx_model.model, onnxruntime.InferenceSession)
Expand Down
8 changes: 0 additions & 8 deletions tests/onnxruntime/utils_onnxruntime_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,6 @@


class ORTModelTestMixin(unittest.TestCase):
ARCH_MODEL_MAP = {}

TENSOR_ALIAS_TO_TYPE = {
"pt": torch.Tensor,
"np": np.ndarray,
Expand Down Expand Up @@ -164,12 +162,6 @@ def _setup(self, model_args: Dict):
# The model with use_cache=True is not supported for bert as a decoder")
continue

if model_arch in self.ARCH_MODEL_MAP:
if isinstance(MODEL_NAMES[model_arch], dict):
model_id = list(self.ARCH_MODEL_MAP[model_arch].keys())[idx]
else:
model_id = self.ARCH_MODEL_MAP[model_arch]

set_seed(SEED)
onnx_model = self.ORTMODEL_CLASS.from_pretrained(
model_id, **model_args, use_io_binding=False, export=True
Expand Down

0 comments on commit e7bd60d

Please sign in to comment.