Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update typos to the latest version and add dependabot.yml #848

Merged
merged 3 commits into from
Oct 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"
2 changes: 1 addition & 1 deletion .github/workflows/typos.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ jobs:
- uses: actions/checkout@v3

- name: typos-action
uses: crate-ci/typos@v1.13.10
uses: crate-ci/typos@v1.16.15
20 changes: 19 additions & 1 deletion _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,25 @@ parms="parms"
nin="nin"
extention="extention" # Intentionally left
nd="nd"
shs="shs"
sts="sts"
scs="scs"
cpc="cpc"
coc="coc"
cic="cic"
msm="msm"
usu="usu"
ici="ici"
lvl="lvl"
dii="dii"
muk="muk"
ori="ori"
hru="hru"
rik="rik"
koo="koo"
yos="yos"
wn="wn"


[files]
extend-exclude = ["_typos.toml"]
extend-exclude = ["_typos.toml", "venv"]
6 changes: 3 additions & 3 deletions fine_tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def train(args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

if args.debug_dataset:
train_util.debug_dataset(train_dataset_group)
Expand Down Expand Up @@ -208,7 +208,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions gen_img_diffusers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3364,7 +3364,7 @@ def setup_parser() -> argparse.ArgumentParser:
)
parser.add_argument("--network_mul", type=float, default=None, nargs="*", help="additional network multiplier / 追加ネットワークの効果の倍率")
parser.add_argument(
"--network_args", type=str, default=None, nargs="*", help="additional argmuments for network (key=value) / ネットワークへの追加の引数"
"--network_args", type=str, default=None, nargs="*", help="additional arguments for network (key=value) / ネットワークへの追加の引数"
)
parser.add_argument("--network_show_meta", action="store_true", help="show metadata of network model / ネットワークモデルのメタデータを表示する")
parser.add_argument("--network_merge", action="store_true", help="merge network weights to original model / ネットワークの重みをマージする")
Expand All @@ -3390,7 +3390,7 @@ def setup_parser() -> argparse.ArgumentParser:
"--max_embeddings_multiples",
type=int,
default=None,
help="max embeding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる",
help="max embedding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる",
)
parser.add_argument(
"--clip_guidance_scale",
Expand Down Expand Up @@ -3449,7 +3449,7 @@ def setup_parser() -> argparse.ArgumentParser:
"--highres_fix_upscaler_args",
type=str,
default=None,
help="additional argmuments for upscaler (key=value) / upscalerへの追加の引数",
help="additional arguments for upscaler (key=value) / upscalerへの追加の引数",
)
parser.add_argument(
"--highres_fix_disable_control_net",
Expand Down
2 changes: 1 addition & 1 deletion library/original_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@
UP_BLOCK_TYPES = ["UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"]


# region memory effcient attention
# region memory efficient attention

# FlashAttentionを使うCrossAttention
# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py
Expand Down
2 changes: 1 addition & 1 deletion library/sdxl_original_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@

USE_REENTRANT = True

# region memory effcient attention
# region memory efficient attention

# FlashAttentionを使うCrossAttention
# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py
Expand Down
2 changes: 1 addition & 1 deletion library/train_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -4658,7 +4658,7 @@ def __getitem__(self, idx):


# collate_fn用 epoch,stepはmultiprocessing.Value
class collater_class:
class collator_class:
def __init__(self, epoch, step, dataset):
self.current_epoch = epoch
self.current_step = step
Expand Down
6 changes: 3 additions & 3 deletions sdxl_gen_img.py
Original file line number Diff line number Diff line change
Expand Up @@ -2612,7 +2612,7 @@ def setup_parser() -> argparse.ArgumentParser:
)
parser.add_argument("--network_mul", type=float, default=None, nargs="*", help="additional network multiplier / 追加ネットワークの効果の倍率")
parser.add_argument(
"--network_args", type=str, default=None, nargs="*", help="additional argmuments for network (key=value) / ネットワークへの追加の引数"
"--network_args", type=str, default=None, nargs="*", help="additional arguments for network (key=value) / ネットワークへの追加の引数"
)
parser.add_argument("--network_show_meta", action="store_true", help="show metadata of network model / ネットワークモデルのメタデータを表示する")
parser.add_argument("--network_merge", action="store_true", help="merge network weights to original model / ネットワークの重みをマージする")
Expand All @@ -2631,7 +2631,7 @@ def setup_parser() -> argparse.ArgumentParser:
"--max_embeddings_multiples",
type=int,
default=None,
help="max embeding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる",
help="max embedding multiples, max token length is 75 * multiples / トークン長をデフォルトの何倍とするか 75*この値 がトークン長となる",
)
parser.add_argument(
"--guide_image_path", type=str, default=None, nargs="*", help="image to CLIP guidance / CLIP guided SDでガイドに使う画像"
Expand Down Expand Up @@ -2666,7 +2666,7 @@ def setup_parser() -> argparse.ArgumentParser:
"--highres_fix_upscaler_args",
type=str,
default=None,
help="additional argmuments for upscaler (key=value) / upscalerへの追加の引数",
help="additional arguments for upscaler (key=value) / upscalerへの追加の引数",
)
parser.add_argument(
"--highres_fix_disable_control_net",
Expand Down
2 changes: 1 addition & 1 deletion sdxl_minimal_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def get_timestep_embedding(x, outdim):
type=str,
nargs="*",
default=[],
help="LoRA weights, only supports networks.lora, each arguement is a `path;multiplier` (semi-colon separated)",
help="LoRA weights, only supports networks.lora, each argument is a `path;multiplier` (semi-colon separated)",
)
parser.add_argument("--interactive", action="store_true")
args = parser.parse_args()
Expand Down
6 changes: 3 additions & 3 deletions sdxl_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,8 @@ def train(args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

train_dataset_group.verify_bucket_reso_steps(32)

Expand Down Expand Up @@ -348,7 +348,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions sdxl_train_control_net_lllite.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ def train(args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

train_dataset_group.verify_bucket_reso_steps(32)

Expand Down Expand Up @@ -245,7 +245,7 @@ def train(args):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions sdxl_train_control_net_lllite_old.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,8 @@ def train(args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

train_dataset_group.verify_bucket_reso_steps(32)

Expand Down Expand Up @@ -213,7 +213,7 @@ def train(args):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions tools/cache_latents.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ def cache_to_disk(args: argparse.Namespace) -> None:

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

# acceleratorを準備する
print("prepare accelerator")
Expand Down Expand Up @@ -120,7 +120,7 @@ def cache_to_disk(args: argparse.Namespace) -> None:
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions tools/cache_text_encoder_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ def cache_to_disk(args: argparse.Namespace) -> None:

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

# acceleratorを準備する
print("prepare accelerator")
Expand Down Expand Up @@ -125,7 +125,7 @@ def cache_to_disk(args: argparse.Namespace) -> None:
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions train_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ def train(args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

if args.debug_dataset:
train_util.debug_dataset(train_dataset_group)
Expand Down Expand Up @@ -245,7 +245,7 @@ def train(args):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions train_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ def train(args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

if args.no_token_padding:
train_dataset_group.disable_token_padding()
Expand Down Expand Up @@ -177,7 +177,7 @@ def train(args):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
8 changes: 4 additions & 4 deletions train_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,8 @@ def train(self, args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

if args.debug_dataset:
train_util.debug_dataset(train_dataset_group)
Expand Down Expand Up @@ -342,7 +342,7 @@ def train(self, args):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down Expand Up @@ -954,7 +954,7 @@ def setup_parser() -> argparse.ArgumentParser:
help="Drops neurons out of training every step (0 or None is default behavior (no dropout), 1 would drop all neurons) / 訓練時に毎ステップでニューロンをdropする(0またはNoneはdropoutなし、1は全ニューロンをdropout)",
)
parser.add_argument(
"--network_args", type=str, default=None, nargs="*", help="additional argmuments for network (key=value) / ネットワークへの追加の引数"
"--network_args", type=str, default=None, nargs="*", help="additional arguments for network (key=value) / ネットワークへの追加の引数"
)
parser.add_argument("--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する")
parser.add_argument(
Expand Down
6 changes: 3 additions & 3 deletions train_textual_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,8 +312,8 @@ def train(self, args):

current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

# make captions: tokenstring tokenstring1 tokenstring2 ...tokenstringn という文字列に書き換える超乱暴な実装
if use_template:
Expand Down Expand Up @@ -389,7 +389,7 @@ def train(self, args):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down
6 changes: 3 additions & 3 deletions train_textual_inversion_XTI.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,8 +236,8 @@ def train(args):
train_dataset_group.enable_XTI(XTI_layers, token_strings=token_strings)
current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)

# make captions: tokenstring tokenstring1 tokenstring2 ...tokenstringn という文字列に書き換える超乱暴な実装
if use_template:
Expand Down Expand Up @@ -309,7 +309,7 @@ def train(args):
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
collate_fn=collator,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
Expand Down