You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
need help reinstall many time but can't use Lora
E:\kohya\kohya_ss\train_network.py:974 in │
│ │
│ 971 │ args = train_util.read_config_from_file(args, parser) │
│ 972 │ │
│ 973 │ trainer = NetworkTrainer() │
│ ❱ 974 │ trainer.train(args) │
│ 975 │
│ │
│ E:\kohya\kohya_ss\train_network.py:250 in train │
│ │
│ 247 │ │ │ vae.requires_grad_(False) │
│ 248 │ │ │ vae.eval() │
│ 249 │ │ │ with torch.no_grad(): │
│ ❱ 250 │ │ │ │ train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_l │
│ 251 │ │ │ vae.to("cpu") │
│ 252 │ │ │ if torch.cuda.is_available(): │
│ 253 │ │ │ │ torch.cuda.empty_cache() │
│ │
│ E:\kohya\kohya_ss\library\train_util.py:1801 in cache_latents │
│ │
│ 1798 │ def cache_latents(self, vae, vae_batch_size=1, cache_to_disk=False, is_main_process= │
│ 1799 │ │ for i, dataset in enumerate(self.datasets): │
│ 1800 │ │ │ print(f"[Dataset {i}]") │
│ ❱ 1801 │ │ │ dataset.cache_latents(vae, vae_batch_size, cache_to_disk, is_main_process) │
│ 1802 │ │
│ 1803 │ def cache_text_encoder_outputs( │
│ 1804 │ │ self, tokenizers, text_encoders, device, weight_dtype, cache_to_disk=False, is_m │
│ │
│ E:\kohya\kohya_ss\library\train_util.py:853 in cache_latents │
│ │
│ 850 │ │ # iterate batches: batch doesn't have image, image will be loaded in cache_batch │
│ 851 │ │ print("caching latents...") │
│ 852 │ │ for batch in tqdm(batches, smoothing=1, total=len(batches)): │
│ ❱ 853 │ │ │ cache_batch_latents(vae, cache_to_disk, batch, subset.flip_aug, subset.rando │
│ 854 │ │
│ 855 │ # weight_dtypeを指定するとText Encoderそのもの、およひ出力がweight_dtypeになる │
│ 856 │ # SDXLでのみ有効だが、datasetのメソッドとする必要があるので、sdxl_train_util.pyでは │
│ │
│ E:\kohya\kohya_ss\library\train_util.py:2124 in cache_batch_latents │
│ │
│ 2121 │ img_tensors = img_tensors.to(device=vae.device, dtype=vae.dtype) │
│ 2122 │ │
│ 2123 │ with torch.no_grad(): │
│ ❱ 2124 │ │ latents = vae.encode(img_tensors).latent_dist.sample().to("cpu") │
│ 2125 │ │
│ 2126 │ if flip_aug: │
│ 2127 │ │ img_tensors = torch.flip(img_tensors, dims=[3]) │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\diffusers\utils\accelerate_utils.py:46 in wrapper │
│ │
│ 43 │ def wrapper(self, *args, **kwargs): │
│ 44 │ │ if hasattr(self, "_hf_hook") and hasattr(self._hf_hook, "pre_forward"): │
│ 45 │ │ │ self._hf_hook.pre_forward(self) │
│ ❱ 46 │ │ return method(self, *args, **kwargs) │
│ 47 │ │
│ 48 │ return wrapper │
│ 49 │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\diffusers\models\autoencoder_kl.py:236 in encode │
│ │
│ 233 │ │ │ encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] │
│ 234 │ │ │ h = torch.cat(encoded_slices) │
│ 235 │ │ else: │
│ ❱ 236 │ │ │ h = self.encoder(x) │
│ 237 │ │ │
│ 238 │ │ moments = self.quant_conv(h) │
│ 239 │ │ posterior = DiagonalGaussianDistribution(moments) │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\torch\nn\modules\module.py:1130 in _call_impl │
│ │
│ 1127 │ │ # this function, and just call forward. │
│ 1128 │ │ if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks o │
│ 1129 │ │ │ │ or _global_forward_hooks or _global_forward_pre_hooks): │
│ ❱ 1130 │ │ │ return forward_call(*input, **kwargs) │
│ 1131 │ │ # Do not call functions when jit is used │
│ 1132 │ │ full_backward_hooks, non_full_backward_hooks = [], [] │
│ 1133 │ │ if self._backward_hooks or _global_backward_hooks: │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\diffusers\models\vae.py:142 in forward │
│ │
│ 139 │ │ │ │ sample = down_block(sample) │
│ 140 │ │ │ │
│ 141 │ │ │ # middle │
│ ❱ 142 │ │ │ sample = self.mid_block(sample) │
│ 143 │ │ │
│ 144 │ │ # post-process │
│ 145 │ │ sample = self.conv_norm_out(sample) │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\torch\nn\modules\module.py:1130 in _call_impl │
│ │
│ 1127 │ │ # this function, and just call forward. │
│ 1128 │ │ if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks o │
│ 1129 │ │ │ │ or _global_forward_hooks or _global_forward_pre_hooks): │
│ ❱ 1130 │ │ │ return forward_call(*input, **kwargs) │
│ 1131 │ │ # Do not call functions when jit is used │
│ 1132 │ │ full_backward_hooks, non_full_backward_hooks = [], [] │
│ 1133 │ │ if self._backward_hooks or _global_backward_hooks: │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\diffusers\models\unet_2d_blocks.py:511 in forward │
│ │
│ 508 │ │ hidden_states = self.resnets[0](hidden_states, temb) │
│ 509 │ │ for attn, resnet in zip(self.attentions, self.resnets[1:]): │
│ 510 │ │ │ if attn is not None: │
│ ❱ 511 │ │ │ │ hidden_states = attn(hidden_states, temb=temb) │
│ 512 │ │ │ hidden_states = resnet(hidden_states, temb) │
│ 513 │ │ │
│ 514 │ │ return hidden_states │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\torch\nn\modules\module.py:1130 in _call_impl │
│ │
│ 1127 │ │ # this function, and just call forward. │
│ 1128 │ │ if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks o │
│ 1129 │ │ │ │ or _global_forward_hooks or _global_forward_pre_hooks): │
│ ❱ 1130 │ │ │ return forward_call(*input, **kwargs) │
│ 1131 │ │ # Do not call functions when jit is used │
│ 1132 │ │ full_backward_hooks, non_full_backward_hooks = [], [] │
│ 1133 │ │ if self._backward_hooks or _global_backward_hooks: │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\diffusers\models\attention_processor.py:321 in forward │
│ │
│ 318 │ │ # The
Attention
class can call different attention processors / attention func ││ 319 │ │ # here we simply pass along all tensors to the selected processor class │
│ 320 │ │ # For standard processors that are defined here,
**cross_attention_kwargs
is e ││ ❱ 321 │ │ return self.processor( │
│ 322 │ │ │ self, │
│ 323 │ │ │ hidden_states, │
│ 324 │ │ │ encoder_hidden_states=encoder_hidden_states, │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\diffusers\models\attention_processor.py:1046 in │
│ call │
│ │
│ 1043 │ │ key = attn.head_to_batch_dim(key).contiguous() │
│ 1044 │ │ value = attn.head_to_batch_dim(value).contiguous() │
│ 1045 │ │ │
│ ❱ 1046 │ │ hidden_states = xformers.ops.memory_efficient_attention( │
│ 1047 │ │ │ query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=att │
│ 1048 │ │ ) │
│ 1049 │ │ hidden_states = hidden_states.to(query.dtype) │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
TypeError: memory_efficient_attention() got an unexpected keyword argument 'scale'
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ C:\Users\AppData\Local\Programs\Python\Python310\lib\runpy.py:196 in │
│ _run_module_as_main │
│ │
│ 193 │ main_globals = sys.modules["main"].dict │
│ 194 │ if alter_argv: │
│ 195 │ │ sys.argv[0] = mod_spec.origin │
│ ❱ 196 │ return _run_code(code, main_globals, None, │
│ 197 │ │ │ │ │ "main", mod_spec) │
│ 198 │
│ 199 def run_module(mod_name, init_globals=None, │
│ │
│ C:\Users\a\AppData\Local\Programs\Python\Python310\lib\runpy.py:86 in _run_code │
│ │
│ 83 │ │ │ │ │ loader = loader, │
│ 84 │ │ │ │ │ package = pkg_name, │
│ 85 │ │ │ │ │ spec = mod_spec) │
│ ❱ 86 │ exec(code, run_globals) │
│ 87 │ return run_globals │
│ 88 │
│ 89 def _run_module_code(code, init_globals=None, │
│ │
│ in :7 │
│ │
│ 4 from accelerate.commands.accelerate_cli import main │
│ 5 if name == 'main': │
│ 6 │ sys.argv[0] = re.sub(r'(-script.pyw|.exe)?$', '', sys.argv[0]) │
│ ❱ 7 │ sys.exit(main()) │
│ 8 │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\accelerate\commands\accelerate_cli.py:45 in main │
│ │
│ 42 │ │ exit(1) │
│ 43 │ │
│ 44 │ # Run │
│ ❱ 45 │ args.func(args) │
│ 46 │
│ 47 │
│ 48 if name == "main": │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\accelerate\commands\launch.py:918 in launch_command │
│ │
│ 915 │ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMA │
│ 916 │ │ sagemaker_launcher(defaults, args) │
│ 917 │ else: │
│ ❱ 918 │ │ simple_launcher(args) │
│ 919 │
│ 920 │
│ 921 def main(): │
│ │
│ E:\kohya\kohya_ss\venv\lib\site-packages\accelerate\commands\launch.py:580 in simple_launcher │
│ │
│ 577 │ process.wait() │
│ 578 │ if process.returncode != 0: │
│ 579 │ │ if not args.quiet: │
│ ❱ 580 │ │ │ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) │
│ 581 │ │ else: │
│ 582 │ │ │ sys.exit(1) │
│ 583 │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
CalledProcessError: Command '['E:\kohya\kohya_ss\venv\Scripts\python.exe', './train_network.py', '--enable_bucket',
'--min_bucket_reso=256', '--max_bucket_reso=2048', '--pretrained_model_name_or_path=E:/stable diffu
SSD/v1-5-pruned-emaonly.safetensors', '--train_data_dir=C:/Users/a/Desktop/ly/image',
'--resolution=512,512', '--output_dir=C:/Users/a/Desktop/ly/model',
'--logging_dir=C:/Users/a/Desktop/ly/log', '--network_alpha=1', '--save_model_as=safetensors',
'--network_module=networks.lora', '--text_encoder_lr=5e-05', '--unet_lr=0.0001', '--network_dim=8', '--output_name=ly',
'--lr_scheduler_num_cycles=1', '--no_half_vae', '--learning_rate=0.0001', '--lr_scheduler=cosine',
'--lr_warmup_steps=78', '--train_batch_size=2', '--max_train_steps=780', '--save_every_n_epochs=1',
'--mixed_precision=fp16', '--save_precision=fp16', '--cache_latents', '--optimizer_type=AdamW8bit',
'--max_data_loader_n_workers=0', '--bucket_reso_steps=64', '--mem_eff_attn', '--gradient_checkpointing', '--xformers',
'--bucket_no_upscale']' returned non-zero exit status 1.
Beta Was this translation helpful? Give feedback.
All reactions