diff --git a/llmfoundry/command_utils/data_prep/convert_finetuning_dataset.py b/llmfoundry/command_utils/data_prep/convert_finetuning_dataset.py index 1cbd47fb45..bb1197de57 100644 --- a/llmfoundry/command_utils/data_prep/convert_finetuning_dataset.py +++ b/llmfoundry/command_utils/data_prep/convert_finetuning_dataset.py @@ -165,7 +165,6 @@ def convert_finetuning_dataset( decoder_only_format=not encoder_decoder, ) - tokenizer = None tokenizer_kwargs = tokenizer_kwargs tokenizer_kwargs.update({'model_max_length': max_seq_len}) if tokenizer: