diff --git a/llmfoundry/command_utils/train.py b/llmfoundry/command_utils/train.py index fe128158c3..3b94e57a6c 100644 --- a/llmfoundry/command_utils/train.py +++ b/llmfoundry/command_utils/train.py @@ -20,7 +20,6 @@ cyclic_schedule, ) from composer.utils import dist, get_device, reproducibility, ParallelismConfig, TPConfig, FSDPConfig -from icecream import install, ic from omegaconf import DictConfig from omegaconf import OmegaConf as om diff --git a/tests/models/utils/test_tp_strategy.py b/tests/models/utils/test_tp_strategy.py index 0e0079ad7e..0166c9ea64 100644 --- a/tests/models/utils/test_tp_strategy.py +++ b/tests/models/utils/test_tp_strategy.py @@ -1,18 +1,10 @@ -import pytest - -from composer.trainer.trainer import Trainer -from composer.utils import dist from torch.distributed.tensor.parallel import ColwiseParallel, RowwiseParallel, PrepareModuleInput from torch.distributed._tensor import Replicate, Shard -from torch.utils.data import DataLoader from llmfoundry.models.mpt.modeling_mpt import ComposerMPTCausalLM from llmfoundry.utils.builders import build_tp_strategy -from icecream import install -install() - def test_ffn_tp_strategy_layer_plan(): # Actual layer plan