Skip to content

Commit

Permalink
Remove extra gc cudas
Browse files Browse the repository at this point in the history
  • Loading branch information
irenedea committed Jul 21, 2024
1 parent 6c071ee commit bc975cf
Showing 1 changed file with 0 additions and 5 deletions.
5 changes: 0 additions & 5 deletions llmfoundry/callbacks/hf_checkpointer.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
from llmfoundry.utils.huggingface_hub_utils import \
edit_files_for_hf_compatibility

from llmfoundry.callbacks.scheduled_gc_callback import gc_cuda

try:
import transformer_engine.pytorch as te
Expand Down Expand Up @@ -395,8 +394,6 @@ def _save_checkpoint(self, state: State, logger: Logger):

log.debug(f'memory before first gc {psutil.virtual_memory()}')

gc_cuda()

log.debug(f'memory {psutil.virtual_memory()}')

if state.is_model_ddp:
Expand Down Expand Up @@ -492,8 +489,6 @@ def dtensor_to_tensor_hook(
log.debug(f'memory after gather {psutil.virtual_memory()}')


gc_cuda()


# you can have the percentage of used RAM
log.debug(f'memory after gather after gc {psutil.virtual_memory()}')
Expand Down

0 comments on commit bc975cf

Please sign in to comment.