Skip to content

Commit

Permalink
reset print memory
Browse files Browse the repository at this point in the history
  • Loading branch information
yingtongxiong committed Oct 25, 2023
1 parent 985465c commit cc20fa2
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 4 deletions.
3 changes: 1 addition & 2 deletions internlm/model/overlap_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,8 +316,7 @@ def before_forward(self, scheduler, inputs) -> None:
self._overlap_handler.set_forward_mode(True)

def after_forward(self, scheduler, outputs) -> None:
print("after forward allocated memory: ", torch.cuda.memory_allocated() / 1024 / 1024 /1024, flush=True)
print("after forward max memory: ", torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024, flush=True)
pass

def before_criterion(self, scheduler, outputs, label) -> None:
pass
Expand Down
2 changes: 0 additions & 2 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,8 +255,6 @@ def main(args):
# update parameters, and returns (success_update, grad_norm)
trainer_result = trainer.step()
assert trainer_result is not None
print("after step: ", torch.cuda.memory_allocated() / 1024 / 1024 /1024, flush=True)
print("after step: ", torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024, flush=True)

success_update, grad_norm_groups = trainer_result
if success_update: # update parameters successfully
Expand Down

0 comments on commit cc20fa2

Please sign in to comment.