From 46b7d72d54838a0ccee4264436090f90124bc2e3 Mon Sep 17 00:00:00 2001 From: Jeremy Dohmann Date: Thu, 9 Nov 2023 11:49:17 -0500 Subject: [PATCH] remove import --- llmfoundry/models/hf/hf_causal_lm.py | 1 - llmfoundry/models/mpt/modeling_mpt.py | 2 -- llmfoundry/utils/builders.py | 4 ---- 3 files changed, 7 deletions(-) diff --git a/llmfoundry/models/hf/hf_causal_lm.py b/llmfoundry/models/hf/hf_causal_lm.py index 872cd3115c..81a27321f3 100644 --- a/llmfoundry/models/hf/hf_causal_lm.py +++ b/llmfoundry/models/hf/hf_causal_lm.py @@ -71,7 +71,6 @@ def __init__(self, om_model_config: Union[DictConfig, InContextLearningMultipleChoiceAccuracy(), InContextLearningQAAccuracy(), InContextLearningCodeEvalAccuracy(), - InContextLearningCodeExecutionPredictionAccuracy(), InContextLearningLMExpectedCalibrationError(), InContextLearningMCExpectedCalibrationError() ] diff --git a/llmfoundry/models/mpt/modeling_mpt.py b/llmfoundry/models/mpt/modeling_mpt.py index 9b91e70b2f..4f4581b177 100644 --- a/llmfoundry/models/mpt/modeling_mpt.py +++ b/llmfoundry/models/mpt/modeling_mpt.py @@ -15,7 +15,6 @@ import torch.nn as nn import torch.nn.functional as F from composer.metrics import (InContextLearningCodeEvalAccuracy, - InContextLearningCodeExecutionPredictionAccuracy, InContextLearningLMAccuracy, InContextLearningLMExpectedCalibrationError, InContextLearningMCExpectedCalibrationError, @@ -708,7 +707,6 @@ def __init__( InContextLearningCodeEvalAccuracy(), InContextLearningLMExpectedCalibrationError(), InContextLearningMCExpectedCalibrationError(), - InContextLearningCodeExecutionPredictionAccuracy() ] super().__init__( diff --git a/llmfoundry/utils/builders.py b/llmfoundry/utils/builders.py index f45482a376..09b90fe18f 100644 --- a/llmfoundry/utils/builders.py +++ b/llmfoundry/utils/builders.py @@ -249,10 +249,6 @@ def _validate_cfg(icl_cfg: DictConfig): icl_cfg.metric_names = ['InContextLearningQAAccuracy'] elif icl_cfg.icl_task_type == 'code_evaluation': icl_cfg.metric_names = ['InContextLearningCodeEvalAccuracy'] - elif icl_cfg.icl_task_type == 'code_execution_prediction': - icl_cfg.metric_names = [ - 'InContextLearningCodeExecutionPredictionAccuracy' - ] else: raise ValueError( f'No metric_names defined, unable to build default metrics for icl_task_type={icl_cfg.icl_task_type}.'