diff --git a/mcli/mcli-openai-eval.yaml b/mcli/mcli-openai-eval.yaml index 179b078fb6..dbccee83ba 100644 --- a/mcli/mcli-openai-eval.yaml +++ b/mcli/mcli-openai-eval.yaml @@ -12,8 +12,8 @@ command: | # Mosaic Cloud will use run_name (with a unique suffix) to populate the env var $RUN_NAME run_name: openai-eval -# gpu_num: # -# gpu_type: # +gpu_num: # +gpu_type: # cluster: # replace with your cluster here! image: mosaicml/llm-foundry:2.1.0_cu121_flash2-latest @@ -25,41 +25,22 @@ parameters: device_eval_batch_size: 4 models: - - model_name: openai/davinci - model: - name: openai_causal_lm - version: davinci - tokenizer: - name: openai - kwargs: - name: davinci - - - model_name: openai/ada - model: - name: openai_causal_lm - version: ada - tokenizer: - name: openai - kwargs: - name: ada - - - model_name: openai/gpt-4 + model_name: openai/gpt-3.5-turbo model: name: openai_chat - version: gpt-4 + version: gpt-3.5-turbo tokenizer: - name: openai + name: tiktoken kwargs: - name: gpt-4 + model_name: gpt-3.5-turbo - - model_name: openai/gpt-3.5-turbo + model_name: openai/davinci model: - name: openai_chat - version: gpt-3.5-turbo + name: openai_causal_lm + version: davinci tokenizer: - name: openai + name: tiktoken kwargs: - name: gpt-3.5-turbo + model_name: davinci - icl_tasks: 'eval/yamls/lm_tasks.yaml' - eval_gauntlet: 'eval/yamls/eval_gauntlet.yaml' + icl_tasks: 'eval/yamls/lm_tasks_v0.2.yaml'