Skip to content

Commit

Permalink
Add draft progress bar for 3 tasks
Browse files Browse the repository at this point in the history
  • Loading branch information
boyleconnor committed Feb 16, 2024
1 parent 8dfe057 commit 9913b9c
Showing 1 changed file with 15 additions and 2 deletions.
17 changes: 15 additions & 2 deletions src/evaluate/evaluator/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from datasets import Dataset, load_dataset

from evaluate.evaluator.utils import choose_split
from tqdm import tqdm


try:
Expand Down Expand Up @@ -510,9 +511,21 @@ def prepare_metric(self, metric: Union[str, EvaluationModule]):

def call_pipeline(self, pipe, *args, **kwargs):
start_time = perf_counter()
pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
# FIXME: Hopefully we can get the progress bar to work with more tasks
if self.task in ("text-classification", "token-classification", "text2text-generation"):
num_rows = len(args[0])
pipe_output = [_ for _ in tqdm(pipe(iter(args[0]), *args[1:], **kwargs, **self.PIPELINE_KWARGS), total=num_rows)]

# I think text2text-generation outputs a list-of-lists when the
# input is an iterator, but the evaluation metrics are expecting a
# flat list.
if self.task == "text2text-generation":
pipe_output = [sub_list[0] for sub_list in pipe_output]
else:
pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
num_rows = len(pipe_output)
end_time = perf_counter()
return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output))
return pipe_output, self._compute_time_perf(start_time, end_time, num_rows)

def compute_metric(
self,
Expand Down

0 comments on commit 9913b9c

Please sign in to comment.