diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..64284b907 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +--- +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" diff --git a/.github/workflows/typos.yml b/.github/workflows/typos.yml index e37838390..90132c334 100644 --- a/.github/workflows/typos.yml +++ b/.github/workflows/typos.yml @@ -18,4 +18,4 @@ jobs: - uses: actions/checkout@v3 - name: typos-action - uses: crate-ci/typos@v1.13.10 + uses: crate-ci/typos@v1.16.15 diff --git a/_typos.toml b/_typos.toml index 396ee5c57..ae9e06b18 100644 --- a/_typos.toml +++ b/_typos.toml @@ -9,7 +9,25 @@ parms="parms" nin="nin" extention="extention" # Intentionally left nd="nd" +shs="shs" +sts="sts" +scs="scs" +cpc="cpc" +coc="coc" +cic="cic" +msm="msm" +usu="usu" +ici="ici" +lvl="lvl" +dii="dii" +muk="muk" +ori="ori" +hru="hru" +rik="rik" +koo="koo" +yos="yos" +wn="wn" [files] -extend-exclude = ["_typos.toml"] +extend-exclude = ["_typos.toml", "venv"] diff --git a/fine_tune.py b/fine_tune.py index f300d4688..2ecb4ff36 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -80,8 +80,8 @@ def train(args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) if args.debug_dataset: train_util.debug_dataset(train_dataset_group) @@ -208,7 +208,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/gen_img_diffusers.py b/gen_img_diffusers.py index 70ca67942..0ec683a23 100644 --- a/gen_img_diffusers.py +++ b/gen_img_diffusers.py @@ -3364,7 +3364,7 @@ def setup_parser() -> argparse.ArgumentParser: ) parser.add_argument("--network_mul", type=float, default=None, nargs="*", help="additional network multiplier / 追加ネットワークの効果の倍率") parser.add_argument( - "--network_args", type=str, default=None, nargs="*", help="additional argmuments for network (key=value) / ネットワークへの追加の引数" + "--network_args", type=str, default=None, nargs="*", help="additional arguments for network (key=value) / ネットワークへの追加の引数" ) parser.add_argument("--network_show_meta", action="store_true", help="show metadata of network model / ネットワークモデルのメタデータを表示する") parser.add_argument("--network_merge", action="store_true", help="merge network weights to original model / ネットワークの重みをマージする") @@ -3390,7 +3390,7 @@ def setup_parser() -> argparse.ArgumentParser: "--max_embeddings_multiples", type=int, default=None, - help="max embeding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる", + help="max embedding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる", ) parser.add_argument( "--clip_guidance_scale", @@ -3449,7 +3449,7 @@ def setup_parser() -> argparse.ArgumentParser: "--highres_fix_upscaler_args", type=str, default=None, - help="additional argmuments for upscaler (key=value) / upscalerへの追加の引数", + help="additional arguments for upscaler (key=value) / upscalerへの追加の引数", ) parser.add_argument( "--highres_fix_disable_control_net", diff --git a/library/original_unet.py b/library/original_unet.py index c0028ddc2..240b85951 100644 --- a/library/original_unet.py +++ b/library/original_unet.py @@ -131,7 +131,7 @@ UP_BLOCK_TYPES = ["UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"] -# region memory effcient attention +# region memory efficient attention # FlashAttentionを使うCrossAttention # based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py diff --git a/library/sdxl_original_unet.py b/library/sdxl_original_unet.py index 586909bdb..26a0af319 100644 --- a/library/sdxl_original_unet.py +++ b/library/sdxl_original_unet.py @@ -41,7 +41,7 @@ USE_REENTRANT = True -# region memory effcient attention +# region memory efficient attention # FlashAttentionを使うCrossAttention # based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py diff --git a/library/train_util.py b/library/train_util.py index 35bfb5f5b..5433357ac 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -4658,7 +4658,7 @@ def __getitem__(self, idx): # collate_fn用 epoch,stepはmultiprocessing.Value -class collater_class: +class collator_class: def __init__(self, epoch, step, dataset): self.current_epoch = epoch self.current_step = step diff --git a/sdxl_gen_img.py b/sdxl_gen_img.py index ac01b76e0..ab2b6b3d6 100755 --- a/sdxl_gen_img.py +++ b/sdxl_gen_img.py @@ -2612,7 +2612,7 @@ def setup_parser() -> argparse.ArgumentParser: ) parser.add_argument("--network_mul", type=float, default=None, nargs="*", help="additional network multiplier / 追加ネットワークの効果の倍率") parser.add_argument( - "--network_args", type=str, default=None, nargs="*", help="additional argmuments for network (key=value) / ネットワークへの追加の引数" + "--network_args", type=str, default=None, nargs="*", help="additional arguments for network (key=value) / ネットワークへの追加の引数" ) parser.add_argument("--network_show_meta", action="store_true", help="show metadata of network model / ネットワークモデルのメタデータを表示する") parser.add_argument("--network_merge", action="store_true", help="merge network weights to original model / ネットワークの重みをマージする") @@ -2631,7 +2631,7 @@ def setup_parser() -> argparse.ArgumentParser: "--max_embeddings_multiples", type=int, default=None, - help="max embeding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる", + help="max embedding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる", ) parser.add_argument( "--guide_image_path", type=str, default=None, nargs="*", help="image to CLIP guidance / CLIP guided SDでガイドに使う画像" @@ -2666,7 +2666,7 @@ def setup_parser() -> argparse.ArgumentParser: "--highres_fix_upscaler_args", type=str, default=None, - help="additional argmuments for upscaler (key=value) / upscalerへの追加の引数", + help="additional arguments for upscaler (key=value) / upscalerへの追加の引数", ) parser.add_argument( "--highres_fix_disable_control_net", diff --git a/sdxl_minimal_inference.py b/sdxl_minimal_inference.py index ff865629e..45b9edd65 100644 --- a/sdxl_minimal_inference.py +++ b/sdxl_minimal_inference.py @@ -101,7 +101,7 @@ def get_timestep_embedding(x, outdim): type=str, nargs="*", default=[], - help="LoRA weights, only supports networks.lora, each arguement is a `path;multiplier` (semi-colon separated)", + help="LoRA weights, only supports networks.lora, each argument is a `path;multiplier` (semi-colon separated)", ) parser.add_argument("--interactive", action="store_true") args = parser.parse_args() diff --git a/sdxl_train.py b/sdxl_train.py index 6b255d679..7bde3cab7 100644 --- a/sdxl_train.py +++ b/sdxl_train.py @@ -172,8 +172,8 @@ def train(args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) train_dataset_group.verify_bucket_reso_steps(32) @@ -348,7 +348,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/sdxl_train_control_net_lllite.py b/sdxl_train_control_net_lllite.py index 61ebfb581..0df61e848 100644 --- a/sdxl_train_control_net_lllite.py +++ b/sdxl_train_control_net_lllite.py @@ -106,8 +106,8 @@ def train(args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) train_dataset_group.verify_bucket_reso_steps(32) @@ -245,7 +245,7 @@ def train(args): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/sdxl_train_control_net_lllite_old.py b/sdxl_train_control_net_lllite_old.py index f8169bdbf..79920a972 100644 --- a/sdxl_train_control_net_lllite_old.py +++ b/sdxl_train_control_net_lllite_old.py @@ -102,8 +102,8 @@ def train(args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) train_dataset_group.verify_bucket_reso_steps(32) @@ -213,7 +213,7 @@ def train(args): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/tools/cache_latents.py b/tools/cache_latents.py index b6991ac19..17916ef70 100644 --- a/tools/cache_latents.py +++ b/tools/cache_latents.py @@ -86,8 +86,8 @@ def cache_to_disk(args: argparse.Namespace) -> None: current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) # acceleratorを準備する print("prepare accelerator") @@ -120,7 +120,7 @@ def cache_to_disk(args: argparse.Namespace) -> None: train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/tools/cache_text_encoder_outputs.py b/tools/cache_text_encoder_outputs.py index 2110e7261..7d9b13d68 100644 --- a/tools/cache_text_encoder_outputs.py +++ b/tools/cache_text_encoder_outputs.py @@ -91,8 +91,8 @@ def cache_to_disk(args: argparse.Namespace) -> None: current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) # acceleratorを準備する print("prepare accelerator") @@ -125,7 +125,7 @@ def cache_to_disk(args: argparse.Namespace) -> None: train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/train_controlnet.py b/train_controlnet.py index 42da44125..5bc8d399c 100644 --- a/train_controlnet.py +++ b/train_controlnet.py @@ -98,8 +98,8 @@ def train(args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) if args.debug_dataset: train_util.debug_dataset(train_dataset_group) @@ -245,7 +245,7 @@ def train(args): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/train_db.py b/train_db.py index feb147787..a1b9cac8b 100644 --- a/train_db.py +++ b/train_db.py @@ -78,8 +78,8 @@ def train(args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) if args.no_token_padding: train_dataset_group.disable_token_padding() @@ -177,7 +177,7 @@ def train(args): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/train_network.py b/train_network.py index 1a1713259..99179814c 100644 --- a/train_network.py +++ b/train_network.py @@ -192,8 +192,8 @@ def train(self, args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) if args.debug_dataset: train_util.debug_dataset(train_dataset_group) @@ -342,7 +342,7 @@ def train(self, args): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) @@ -954,7 +954,7 @@ def setup_parser() -> argparse.ArgumentParser: help="Drops neurons out of training every step (0 or None is default behavior (no dropout), 1 would drop all neurons) / 訓練時に毎ステップでニューロンをdropする(0またはNoneはdropoutなし、1は全ニューロンをdropout)", ) parser.add_argument( - "--network_args", type=str, default=None, nargs="*", help="additional argmuments for network (key=value) / ネットワークへの追加の引数" + "--network_args", type=str, default=None, nargs="*", help="additional arguments for network (key=value) / ネットワークへの追加の引数" ) parser.add_argument("--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する") parser.add_argument( diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 1c7b7fcb2..252add536 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -312,8 +312,8 @@ def train(self, args): current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) # make captions: tokenstring tokenstring1 tokenstring2 ...tokenstringn という文字列に書き換える超乱暴な実装 if use_template: @@ -389,7 +389,7 @@ def train(self, args): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) diff --git a/train_textual_inversion_XTI.py b/train_textual_inversion_XTI.py index 2c5673be1..525e612f1 100644 --- a/train_textual_inversion_XTI.py +++ b/train_textual_inversion_XTI.py @@ -236,8 +236,8 @@ def train(args): train_dataset_group.enable_XTI(XTI_layers, token_strings=token_strings) current_epoch = Value("i", 0) current_step = Value("i", 0) - ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None - collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = train_util.collator_class(current_epoch, current_step, ds_for_collator) # make captions: tokenstring tokenstring1 tokenstring2 ...tokenstringn という文字列に書き換える超乱暴な実装 if use_template: @@ -309,7 +309,7 @@ def train(args): train_dataset_group, batch_size=1, shuffle=True, - collate_fn=collater, + collate_fn=collator, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, )