From 779f4908d8e659123098707af1bbd7ab9856cf62 Mon Sep 17 00:00:00 2001 From: Jeremy Dohmann Date: Thu, 11 Apr 2024 19:39:17 -0400 Subject: [PATCH] fix pyright --- llmfoundry/eval/datasets/in_context_learning_evaluation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llmfoundry/eval/datasets/in_context_learning_evaluation.py b/llmfoundry/eval/datasets/in_context_learning_evaluation.py index 30502d2d92..df5799df2b 100644 --- a/llmfoundry/eval/datasets/in_context_learning_evaluation.py +++ b/llmfoundry/eval/datasets/in_context_learning_evaluation.py @@ -493,6 +493,8 @@ def split_batch(self, batch: Any, microbatch_size: Union[int , float]) -> Sequen # Don't split kwargs that don't change # Normally split torch tensors # List split lists of strings + if isinstance(microbatch_size, float): + raise ValueError('split_batch does not support floating point microbatch_size.') chunked = {} for k, v in batch.items(): if k in self.static_keys: @@ -922,6 +924,8 @@ def split_batch(self, batch: Any, microbatch_size: Union[int , float]) -> Sequen Returns: list: List of chunked batches """ + if isinstance(microbatch_size, float): + raise ValueError('split_batch does not support floating point microbatch_size.') chunked = {} for k, v in batch.items(): if k in self.static_keys: