Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add eval_drop_last flag to fix TE eval bug #1247

Closed
wants to merge 13 commits into from
Closed
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions llmfoundry/eval/datasets/in_context_learning_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1311,6 +1311,7 @@ def build_icl_dataloader(
generation_kwargs: Dict,
early_stopping_criteria: Optional[List[str]] = None,
do_normalization: bool = True,
eval_drop_last: bool = False,
) -> DataSpec:
"""Factory method that builds the specific dataset for the specified.

Expand Down Expand Up @@ -1421,6 +1422,7 @@ def build_icl_dataloader(
batch_size=effective_batchsize,
sampler=sampler,
collate_fn=dataset.collate_fn,
drop_last=eval_drop_last,
),
device_transforms=None,
get_num_samples_in_batch=dataset.get_num_samples_in_batch,
Expand Down Expand Up @@ -1532,6 +1534,7 @@ def get_icl_task_dataloader(
generation_kwargs: Optional[Dict] = None,
early_stopping_criteria: Optional[List[str]] = None,
do_normalization: bool = True,
eval_drop_last: bool = False,
) -> Union[DataSpec, Dict[str, DataSpec]]:
r"""Constructs a dataloader (or dataloaders if has_categories is True)

Expand Down Expand Up @@ -1656,6 +1659,7 @@ def get_icl_task_dataloader(
generation_kwargs=generation_kwargs,
early_stopping_criteria=early_stopping_criteria,
do_normalization=do_normalization,
eval_drop_last=eval_drop_last,
)
return result_dls
else:
Expand All @@ -1681,4 +1685,5 @@ def get_icl_task_dataloader(
generation_kwargs=generation_kwargs,
early_stopping_criteria=early_stopping_criteria,
do_normalization=do_normalization,
eval_drop_last=eval_drop_last,
)
16 changes: 16 additions & 0 deletions llmfoundry/utils/builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def build_evaluators(
device_eval_batch_size: int,
icl_seq_len: int,
icl_subset_num_batches: Optional[int],
eval_drop_last: bool = False,
) -> Tuple[List[Evaluator], List[str], Optional[EvalGauntlet]]:

evaluators = []
Expand All @@ -85,6 +86,7 @@ def build_evaluators(
device_eval_batch_size,
icl_seq_len,
icl_subset_num_batches,
eval_drop_last=eval_drop_last,
)
evaluators.extend(icl_evaluators)

Expand Down Expand Up @@ -150,13 +152,15 @@ def build_icl_data_and_gauntlet(
device_eval_batch_size: int,
icl_seq_len: int,
icl_subset_num_batches: Optional[int] = None,
eval_drop_last: bool = False,
) -> Tuple[List[Evaluator], List[str], Optional[EvalGauntlet]]:
icl_evaluators, logger_keys = build_icl_evaluators(
icl_tasks_config,
tokenizer,
icl_seq_len,
device_eval_batch_size,
icl_subset_num_batches=icl_subset_num_batches,
eval_drop_last=eval_drop_last,
)
eval_gauntlet_cb = None
if eval_gauntlet_config is not None:
Expand Down Expand Up @@ -503,6 +507,7 @@ def build_icl_evaluators(
default_batch_size: int,
destination_dir: Optional[str] = None,
icl_subset_num_batches: Optional[int] = None,
eval_drop_last: bool = False,
) -> Tuple[List[Evaluator], List[str]]:
if destination_dir is None:
destination_dir = os.getcwd()
Expand Down Expand Up @@ -621,10 +626,16 @@ def _validate_cfg(icl_cfg: Dict[str, Any]):
generation_kwargs=icl_cfg.get('generation_kwargs', {}),
early_stopping_criteria=early_stopping_criteria,
do_normalization=icl_cfg.get('do_normalization', True),
eval_drop_last=eval_drop_last,
)
if 'has_categories' in icl_cfg and icl_cfg[
'has_categories'] and isinstance(dataloaders, dict):
for category in dataloaders.keys():
if len(dataloaders[category].dataloader) == 0:
log.warning(
f'No data for {label}/{category}, skipping. May have been filtered out by eval_drop_last={eval_drop_last} and batch size={default_batch_size}.',
)
continue
logger_keys.extend([
f'metrics/{label}/{category}/{m}' for m in metric_names
])
Expand All @@ -636,6 +647,11 @@ def _validate_cfg(icl_cfg: Dict[str, Any]):
),
)
else:
if len(dataloaders.dataloader) == 0: # type: ignore
log.warning(
f'No data for {label}, skipping. May have been filtered out by eval_drop_last={eval_drop_last} and batch size={default_batch_size}.',
)
continue
logger_keys.extend([
f'metrics/{label}/{m}' for m in metric_names
])
Expand Down
1 change: 1 addition & 0 deletions llmfoundry/utils/config_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ class TrainConfig:
eval_gauntlet_str: Optional[str] = None # should not be set by the user
icl_subset_num_batches: Optional[int] = None
icl_seq_len: Optional[int] = None
eval_drop_last: Optional[bool] = False

# Logging
loggers: Optional[Dict[str, Any]] = None
Expand Down
Loading
Loading