Skip to content

Commit

Permalink
device agnostic pipelines testing (#27129)
Browse files Browse the repository at this point in the history
* device agnostic pipelines testing

* pass torch_device
  • Loading branch information
statelesshz authored Oct 31, 2023
1 parent 08fadc8 commit f53041a
Show file tree
Hide file tree
Showing 10 changed files with 64 additions and 58 deletions.
17 changes: 8 additions & 9 deletions tests/pipelines/test_pipelines_automatic_speech_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,10 @@
require_pyctcdecode,
require_tf,
require_torch,
require_torch_gpu,
require_torch_accelerator,
require_torchaudio,
slow,
torch_device,
)

from .test_pipelines_common import ANY
Expand Down Expand Up @@ -166,13 +167,11 @@ def test_small_model_pt(self):
_ = speech_recognizer(waveform, return_timestamps="char")

@slow
@require_torch
@require_torch_accelerator
def test_whisper_fp16(self):
if not torch.cuda.is_available():
self.skipTest("Cuda is necessary for this test")
speech_recognizer = pipeline(
model="openai/whisper-base",
device=0,
device=torch_device,
torch_dtype=torch.float16,
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
Expand Down Expand Up @@ -904,12 +903,12 @@ def test_speech_to_text_leveraged(self):
self.assertEqual(output, {"text": "a man said to the universe sir i exist"})

@slow
@require_torch_gpu
@require_torch_accelerator
def test_wav2vec2_conformer_float16(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-conformer-rope-large-960h-ft",
device="cuda:0",
device=torch_device,
torch_dtype=torch.float16,
framework="pt",
)
Expand Down Expand Up @@ -1304,14 +1303,14 @@ def test_stride(self):
self.assertEqual(output, {"text": "XB"})

@slow
@require_torch_gpu
@require_torch_accelerator
def test_slow_unfinished_sequence(self):
from transformers import GenerationConfig

pipe = pipeline(
"automatic-speech-recognition",
model="vasista22/whisper-hindi-large-v2",
device="cuda:0",
device=torch_device,
)
# Original model wasn't trained with timestamps and has incorrect generation config
pipe.model.generation_config = GenerationConfig.from_pretrained("openai/whisper-large-v2")
Expand Down
20 changes: 11 additions & 9 deletions tests/pipelines/test_pipelines_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,15 +40,17 @@
USER,
CaptureLogger,
RequestCounter,
backend_empty_cache,
is_pipeline_test,
is_staging_test,
nested_simplify,
require_tensorflow_probability,
require_tf,
require_torch,
require_torch_gpu,
require_torch_accelerator,
require_torch_or_tf,
slow,
torch_device,
)
from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available
from transformers.utils import logging as transformers_logging
Expand Down Expand Up @@ -511,7 +513,7 @@ def test_load_default_pipelines_pt(self):

# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

@slow
@require_tf
Expand Down Expand Up @@ -541,20 +543,20 @@ def test_load_default_pipelines_pt_table_qa(self):

# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

@slow
@require_torch
@require_torch_gpu
def test_pipeline_cuda(self):
pipe = pipeline("text-generation", device="cuda")
@require_torch_accelerator
def test_pipeline_accelerator(self):
pipe = pipeline("text-generation", device=torch_device)
_ = pipe("Hello")

@slow
@require_torch
@require_torch_gpu
def test_pipeline_cuda_indexed(self):
pipe = pipeline("text-generation", device="cuda:0")
@require_torch_accelerator
def test_pipeline_accelerator_indexed(self):
pipe = pipeline("text-generation", device=torch_device)
_ = pipe("Hello")

@slow
Expand Down
14 changes: 5 additions & 9 deletions tests/pipelines/test_pipelines_conversational.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
pipeline,
)
from transformers.testing_utils import (
backend_empty_cache,
is_pipeline_test,
is_torch_available,
require_tf,
Expand All @@ -42,19 +43,14 @@
from .test_pipelines_common import ANY


DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0


@is_pipeline_test
class ConversationalPipelineTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch

torch.cuda.empty_cache()
backend_empty_cache(torch_device)

model_mapping = dict(
list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items())
Expand Down Expand Up @@ -136,7 +132,7 @@ def run_pipeline_test(self, conversation_agent, _):
@slow
def test_integration_torch_conversation(self):
# When
conversation_agent = pipeline(task="conversational", device=DEFAULT_DEVICE_NUM)
conversation_agent = pipeline(task="conversational", device=torch_device)
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
# Then
Expand Down Expand Up @@ -168,7 +164,7 @@ def test_integration_torch_conversation(self):
@slow
def test_integration_torch_conversation_truncated_history(self):
# When
conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=DEFAULT_DEVICE_NUM)
conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=torch_device)
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
# Then
self.assertEqual(len(conversation_1.past_user_inputs), 1)
Expand Down Expand Up @@ -374,7 +370,7 @@ def test_integration_torch_conversation_encoder_decoder(self):
# When
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot_small-90M")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=DEFAULT_DEVICE_NUM)
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=torch_device)

conversation_1 = Conversation("My name is Sarah and I live in London")
conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ")
Expand Down
17 changes: 11 additions & 6 deletions tests/pipelines/test_pipelines_fill_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,15 @@
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
backend_empty_cache,
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
require_torch_accelerator,
slow,
torch_device,
)

from .test_pipelines_common import ANY
Expand All @@ -40,9 +42,7 @@ def tearDown(self):
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch

torch.cuda.empty_cache()
backend_empty_cache(torch_device)

@require_tf
def test_small_model_tf(self):
Expand Down Expand Up @@ -148,9 +148,14 @@ def test_small_model_pt(self):
],
)

@require_torch_gpu
@require_torch_accelerator
def test_fp16_casting(self):
pipe = pipeline("fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=0, framework="pt")
pipe = pipeline(
"fill-mask",
model="hf-internal-testing/tiny-random-distilbert",
device=torch_device,
framework="pt",
)

# convert model to fp16
pipe.model.half()
Expand Down
5 changes: 1 addition & 4 deletions tests/pipelines/test_pipelines_summarization.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,6 @@
from .test_pipelines_common import ANY


DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0


@is_pipeline_test
class SummarizationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
Expand Down Expand Up @@ -106,7 +103,7 @@ def test_small_model_tf(self):
@require_torch
@slow
def test_integration_torch_summarization(self):
summarizer = pipeline(task="summarization", device=DEFAULT_DEVICE_NUM)
summarizer = pipeline(task="summarization", device=torch_device)
cnn_article = (
" (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on"
" Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The"
Expand Down
6 changes: 2 additions & 4 deletions tests/pipelines/test_pipelines_text_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device

from .test_pipelines_common import ANY

Expand Down Expand Up @@ -96,13 +96,11 @@ def test_small_model_pt(self):

@require_torch
def test_accepts_torch_device(self):
import torch

text_classifier = pipeline(
task="text-classification",
model="hf-internal-testing/tiny-random-distilbert",
framework="pt",
device=torch.device("cpu"),
device=torch_device,
)

outputs = text_classifier("This is great !")
Expand Down
12 changes: 9 additions & 3 deletions tests/pipelines/test_pipelines_text_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,10 @@
require_accelerate,
require_tf,
require_torch,
require_torch_accelerator,
require_torch_gpu,
require_torch_or_tf,
torch_device,
)

from .test_pipelines_common import ANY
Expand Down Expand Up @@ -319,16 +321,20 @@ def test_small_model_pt_bloom_accelerate(self):
)

@require_torch
@require_torch_gpu
@require_torch_accelerator
def test_small_model_fp16(self):
import torch

pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device=0, torch_dtype=torch.float16)
pipe = pipeline(
model="hf-internal-testing/tiny-random-bloom",
device=torch_device,
torch_dtype=torch.float16,
)
pipe("This is a test")

@require_torch
@require_accelerate
@require_torch_gpu
@require_torch_accelerator
def test_pipeline_accelerate_top_p(self):
import torch

Expand Down
7 changes: 4 additions & 3 deletions tests/pipelines/test_pipelines_text_to_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,10 @@
from transformers.testing_utils import (
is_pipeline_test,
require_torch,
require_torch_gpu,
require_torch_accelerator,
require_torch_or_tf,
slow,
torch_device,
)

from .test_pipelines_common import ANY
Expand Down Expand Up @@ -115,9 +116,9 @@ def test_small_bark_pt(self):
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)

@slow
@require_torch_gpu
@require_torch_accelerator
def test_conversion_additional_tensor(self):
speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=0)
speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device)
processor = AutoProcessor.from_pretrained("suno/bark-small")

forward_params = {
Expand Down
9 changes: 5 additions & 4 deletions tests/pipelines/test_pipelines_token_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,9 @@
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
require_torch_accelerator,
slow,
torch_device,
)

from .test_pipelines_common import ANY
Expand Down Expand Up @@ -391,13 +392,13 @@ def test_spanish_bert(self):
],
)

@require_torch_gpu
@require_torch_accelerator
@slow
def test_gpu(self):
def test_accelerator(self):
sentence = "This is dummy sentence"
ner = pipeline(
"token-classification",
device=0,
device=torch_device,
aggregation_strategy=AggregationStrategy.SIMPLE,
)

Expand Down
15 changes: 8 additions & 7 deletions tests/pipelines/test_pipelines_visual_question_answering.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)

from .test_pipelines_common import ANY
Expand Down Expand Up @@ -91,7 +92,7 @@ def test_small_model_pt(self):
)

@require_torch
@require_torch_gpu
@require_torch_accelerator
def test_small_model_pt_blip2(self):
vqa_pipeline = pipeline(
"visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration"
Expand All @@ -112,9 +113,9 @@ def test_small_model_pt_blip2(self):
"visual-question-answering",
model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration",
model_kwargs={"torch_dtype": torch.float16},
device=0,
device=torch_device,
)
self.assertEqual(vqa_pipeline.model.device, torch.device(0))
self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device)))
self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16)
self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16)

Expand Down Expand Up @@ -148,15 +149,15 @@ def test_large_model_pt(self):

@slow
@require_torch
@require_torch_gpu
@require_torch_accelerator
def test_large_model_pt_blip2(self):
vqa_pipeline = pipeline(
"visual-question-answering",
model="Salesforce/blip2-opt-2.7b",
model_kwargs={"torch_dtype": torch.float16},
device=0,
device=torch_device,
)
self.assertEqual(vqa_pipeline.model.device, torch.device(0))
self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device)))
self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16)

image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
Expand Down

0 comments on commit f53041a

Please sign in to comment.