Skip to content

Commit

Permalink
Allow custom loggers without an experiment property (#18093)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Carlos Mocholí <[email protected]>

(cherry picked from commit 281d6a2)
  • Loading branch information
awaelchli authored and Borda committed Aug 28, 2023
1 parent f97058d commit fdb7f43
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 5 deletions.
3 changes: 3 additions & 0 deletions src/lightning/pytorch/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Fixed FSDP full-precision `param_dtype` training (`16-mixed`, `bf16-mixed` and `32-true` configurations) to avoid FSDP assertion errors with PyTorch < 2.0 ([#18278](https://github.com/Lightning-AI/lightning/pull/18278))


- Fixed an issue that prevented the use of custom logger classes without an `experiment` property defined ([#18093](https://github.com/Lightning-AI/lightning/pull/18093))


## [2.0.7] - 2023-08-14

### Added
Expand Down
3 changes: 2 additions & 1 deletion src/lightning/pytorch/trainer/call.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,8 @@ def _call_setup_hook(trainer: "pl.Trainer") -> None:

# Trigger lazy creation of experiment in loggers so loggers have their metadata available
for logger in trainer.loggers:
_ = logger.experiment
if hasattr(logger, "experiment"):
_ = logger.experiment

trainer.strategy.barrier("pre_setup")

Expand Down
27 changes: 23 additions & 4 deletions tests/tests_pytorch/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
TensorBoardLogger,
WandbLogger,
)
from lightning.pytorch.loggers.logger import DummyExperiment
from lightning.pytorch.loggers.logger import DummyExperiment, Logger
from lightning.pytorch.loggers.tensorboard import _TENSORBOARD_AVAILABLE
from lightning.pytorch.tuner.tuning import Tuner
from tests_pytorch.helpers.runif import RunIf
Expand Down Expand Up @@ -239,7 +239,7 @@ def setup(self, trainer, pl_module, stage=None):
assert trainer.logger._mlflow_client
elif isinstance(trainer.logger, NeptuneLogger):
assert trainer.logger._run_instance
else:
elif hasattr(trainer.logger, "_experiment"):
assert trainer.logger._experiment


Expand All @@ -253,7 +253,23 @@ def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
assert pl_module.logger.experiment.something(foo="bar") is None


@pytest.mark.parametrize("logger_class", ALL_LOGGER_CLASSES_WO_NEPTUNE)
class CustomLoggerWithoutExperiment(Logger):
@property
def name(self):
return ""

@property
def version(self):
return None

def log_metrics(self, metrics, step=None) -> None:
pass

def log_hyperparams(self, params, *args, **kwargs) -> None:
pass


@pytest.mark.parametrize("logger_class", [*ALL_LOGGER_CLASSES_WO_NEPTUNE, CustomLoggerWithoutExperiment])
@RunIf(skip_windows=True)
def test_logger_initialization(tmpdir, monkeypatch, logger_class):
"""Test that loggers get replaced by dummy loggers on global rank > 0 and that the experiment object is available
Expand All @@ -268,6 +284,9 @@ def test_logger_initialization(tmpdir, monkeypatch, logger_class):
def _test_logger_initialization(tmpdir, logger_class):
logger_args = _get_logger_args(logger_class, tmpdir)
logger = logger_class(**logger_args)
callbacks = [LazyInitExperimentCheck()]
if not isinstance(logger, CustomLoggerWithoutExperiment):
callbacks.append(RankZeroLoggerCheck())
model = BoringModel()
trainer = Trainer(
logger=logger,
Expand All @@ -276,7 +295,7 @@ def _test_logger_initialization(tmpdir, logger_class):
accelerator="cpu",
devices=2,
max_steps=1,
callbacks=[RankZeroLoggerCheck(), LazyInitExperimentCheck()],
callbacks=callbacks,
)
trainer.fit(model)

Expand Down

0 comments on commit fdb7f43

Please sign in to comment.