diff --git a/README.md b/README.md index 89a96827c..8db5c4d42 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,11 @@ The command to install PyTorch is as follows: 1 Dec, 2024: -- Pseudo Huber loss is now available for FLUX.1 and SD3.5 training. See [#1808](https://github.com/kohya-ss/sd-scripts/pull/1808) for details. Thanks to recris! +- Pseudo Huber loss is now available for FLUX.1 and SD3.5 training. See PR [#1808](https://github.com/kohya-ss/sd-scripts/pull/1808) for details. Thanks to recris! - Specify `--loss_type huber` or `--loss_type smooth_l1` to use it. `--huber_c` and `--huber_scale` are also available. +- [Prodigy + ScheduleFree](https://github.com/LoganBooker/prodigy-plus-schedule-free) is supported. See PR [#1811](https://github.com/kohya-ss/sd-scripts/pull/1811) for details. Thanks to rockerBOO! + Nov 14, 2024: - Improved the implementation of block swap and made it available for both FLUX.1 and SD3 LoRA training. See [FLUX.1 LoRA training](#flux1-lora-training) etc. for how to use the new options. Training is possible with about 8-10GB of VRAM. diff --git a/library/train_util.py b/library/train_util.py index 289ab8235..6cfd14d5e 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -4609,7 +4609,7 @@ def task(): def get_optimizer(args, trainable_params): # "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit, Lion8bit, PagedLion8bit, AdEMAMix8bit, PagedAdEMAMix8bit, DAdaptation(DAdaptAdamPreprint), DAdaptAdaGrad, DAdaptAdam, DAdaptAdan, DAdaptAdanIP, DAdaptLion, DAdaptSGD, Adafactor" - + optimizer_type = args.optimizer_type if args.use_8bit_adam: assert ( @@ -4883,7 +4883,6 @@ def get_optimizer(args, trainable_params): optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) elif optimizer_type.endswith("schedulefree".lower()): - should_train_optimizer = True try: import schedulefree as sf except ImportError: @@ -5000,8 +4999,8 @@ def __instancecheck__(self, instance): optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()]) - if hasattr(optimizer, 'train') and callable(optimizer.train): - # make optimizer as train mode: we don't need to call train again, because eval will not be called in training loop + if hasattr(optimizer, "train") and callable(optimizer.train): + # make optimizer as train mode before training for schedulefree optimizer. the optimizer will be in eval mode in sampling and saving. optimizer.train() return optimizer_name, optimizer_args, optimizer