Skip to content

Commit

Permalink
enable autoconvert
Browse files Browse the repository at this point in the history
New --autoconvert CLI option will scan a designated directory for
new .ckpt files, convert them into diffuser models, and import
them into models.yaml.

Works like this:

   invoke.py --autoconvert /path/to/weights/directory

In ModelCache added two new methods:

  autoconvert_weights(config_path, weights_directory_path, models_directory_path)
  convert_and_import(ckpt_path, diffuser_path)
  • Loading branch information
lstein committed Dec 23, 2022
1 parent 3a61258 commit b72c878
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 32 deletions.
46 changes: 18 additions & 28 deletions ldm/invoke/CLI.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from ldm.invoke.image_util import make_grid
from ldm.invoke.log import write_log
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
from ldm.invoke.model_cache import ModelCache
from omegaconf import OmegaConf
from pathlib import Path
import pyparsing
Expand Down Expand Up @@ -126,6 +127,14 @@ def main():
emergency_model_reconfigure(opt)
sys.exit(-1)

# try to autoconvert new models
# autoimport new .ckpt files
if path := opt.autoconvert:
gen.model_cache.autoconvert_weights(
conf_path=opt.conf,
weights_directory=path,
)

# web server loops forever
if opt.web or opt.gui:
invoke_ai_web_server_loop(gen, gfpgan, codeformer, esrgan)
Expand Down Expand Up @@ -591,35 +600,16 @@ def add_weights_to_config(model_path:str, gen, opt, completer):
if write_config_file(opt.conf, gen, model_name, new_config, make_default=make_default):
completer.add_model(model_name)

def optimize_model(model_path:str, gen, opt, completer):
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
import transformers
basename = os.path.basename(os.path.splitext(model_path)[0])
dump_path = os.path.join(Globals.root, 'models','optimized-ckpts',basename)
if os.path.exists(dump_path):
print(f'ERROR: The path {dump_path} already exists. Please move or remove it and try again.')
def optimize_model(ckpt_path:str, gen, opt, completer):
ckpt_path = Path(ckpt_path)
basename = ckpt_path.stem
diffuser_path = Path(Globals.root, 'models','optimized-ckpts',basename)
if diffuser_path.exists():
print(f'** {basename} is already optimized. Will not overwrite.')
return

print(f'INFO: Converting legacy weights file {model_path} to optimized diffuser model.')
print(f' This operation will take 30-60s to complete.')
try:
verbosity =transformers.logging.get_verbosity()
transformers.logging.set_verbosity_error()
convert_ckpt_to_diffuser(model_path, dump_path)
transformers.logging.set_verbosity(verbosity)
print(f'Success. Optimized model is now located at {dump_path}')
print(f'Writing new config file entry for {basename}...')
model_name = basename
new_config = dict(
path=dump_path,
description=f'Optimized version of {basename}',
format='diffusers',
)
if write_config_file(opt.conf, gen, model_name, new_config):
completer.add_model(model_name)
except Exception as e:
print(f'** Conversion failed: {str(e)}')
traceback.print_exc()
new_config = gen.model_cache.convert_and_import(ckpt_path, diffuser_path)
if write_config_file(opt.conf, gen, basename, new_config, clobber=False):
completer.add_model(basename)

def del_config(model_name:str, gen, opt, completer):
current_model = gen.model_name
Expand Down
6 changes: 6 additions & 0 deletions ldm/invoke/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,12 @@ def _create_arg_parser(self):
default=False,
help='Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.',
)
model_group.add_argument(
'--autoconvert',
default=None,
type=str,
help='Check the indicated directory for .ckpt weights files at startup and import as optimized diffuser models',
)
model_group.add_argument(
'--patchmatch',
action=argparse.BooleanOptionalAction,
Expand Down
14 changes: 14 additions & 0 deletions ldm/invoke/globals.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

import os
import os.path as osp
from pathlib import Path
from argparse import Namespace

Globals = Namespace()
Expand All @@ -27,9 +28,22 @@

# Where to look for the initialization file
Globals.initfile = 'invokeai.init'
Globals.models_dir = 'models'
Globals.config_dir = 'configs'
Globals.autoscan_dir = 'weights'

# Try loading patchmatch
Globals.try_patchmatch = True

# Use CPU even if GPU is available (main use case is for debugging MPS issues)
Globals.always_use_cpu = False

def global_config_dir()->str:
return Path(Globals.root, Globals.config_dir)

def global_models_dir()->str:
return Path(Globals.root, Globals.models_dir)

def global_autoscan_dir()->str:
return Path(Globals.root, Globals.autoscan_dir)

91 changes: 87 additions & 4 deletions ldm/invoke/model_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

import torch
import transformers
from diffusers import AutoencoderKL
from diffusers import AutoencoderKL, logging as dlogging
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import RevisionNotFoundError
from omegaconf import OmegaConf
Expand All @@ -30,7 +30,7 @@
from picklescan.scanner import scan_file_path

from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
from ldm.invoke.globals import Globals
from ldm.invoke.globals import Globals, global_config_dir, global_models_dir, global_autoscan_dir
from ldm.util import instantiate_from_config, ask_user

DEFAULT_MAX_MODELS=2
Expand Down Expand Up @@ -96,7 +96,7 @@ def get_model(self, model_name:str):

except Exception as e:
print(f'** model {model_name} could not be loaded: {str(e)}')
print(traceback.format_exc())
traceback.print_exc()
assert self.current_model,'** FATAL: no current model to restore to'
print(f'** restoring {self.current_model}')
self.get_model(self.current_model)
Expand Down Expand Up @@ -366,6 +366,9 @@ def _load_diffusers_model(self, mconfig):
else:
fp_args_list = [{}]

verbosity = dlogging.get_verbosity()
dlogging.set_verbosity_error()

pipeline = None
for fp_args in fp_args_list:
try:
Expand All @@ -374,6 +377,7 @@ def _load_diffusers_model(self, mconfig):
**pipeline_args,
**fp_args,
)

except OSError as e:
if str(e).startswith('fp16 is not a valid'):
print(f'Could not fetch half-precision version of model {repo_id}; fetching full-precision instead')
Expand All @@ -382,6 +386,7 @@ def _load_diffusers_model(self, mconfig):
if pipeline:
break

dlogging.set_verbosity_error()
assert pipeline is not None, OSError(f'"{model_name}" could not be loaded')

pipeline.to(self.device)
Expand Down Expand Up @@ -430,6 +435,10 @@ def offload_model(self, model_name:str) -> None:
torch.cuda.empty_cache()

def scan_model(self, model_name, checkpoint):
'''
Apply picklescanner to the indicated checkpoint and issue a warning
and option to exit if an infected file is identified.
'''
# scan model
print(f'>> Scanning Model: {model_name}')
scan_result = scan_file_path(checkpoint)
Expand All @@ -448,7 +457,81 @@ def scan_model(self, model_name, checkpoint):
print("### Exiting InvokeAI")
sys.exit()
else:
print('>> Model Scanned. OK!!')
print('>> Model scanned ok!')

def autoconvert_weights(
self,
conf_path:Path,
weights_directory:Path=None,
dest_directory:Path=None,
):
'''
Scan the indicated directory for .ckpt files, convert into diffuser models,
and import.
'''
weights_directory = weights_directory or global_autoscan_dir()
dest_directory = dest_directory or Path(global_models_dir(), 'optimized-ckpts')

print('>> Checking for unconverted .ckpt files in {weights_directory}')
ckpt_files = dict()
for root, dirs, files in os.walk(weights_directory):
for f in files:
if not f.endswith('.ckpt'):
continue
basename = Path(f).stem
dest = Path(dest_directory,basename)
if not dest.exists():
ckpt_files[Path(root,f)]=dest

if len(ckpt_files)==0:
return

print(f'>> New .ckpt file(s) found in {weights_directory}. Optimizing and importing...')
for ckpt in ckpt_files:
self.convert_and_import(ckpt, ckpt_files[ckpt])
self.commit(conf_path)

def convert_and_import(self, ckpt_path:Path, diffuser_path:Path)->dict:
'''
Convert a legacy ckpt weights file to diffuser model and import
into models.yaml.
'''
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
import transformers
if diffuser_path.exists():
print(f'ERROR: The path {str(diffuser_path)} already exists. Please move or remove it and try again.')
return

print(f'>> {ckpt_path.name}: optimizing (30-60s).')
try:
model_name = diffuser_path.name
verbosity =transformers.logging.get_verbosity()
transformers.logging.set_verbosity_error()
convert_ckpt_to_diffuser(ckpt_path, diffuser_path)
transformers.logging.set_verbosity(verbosity)
print(f'>> Success. Optimized model is now located at {str(diffuser_path)}')
print(f'>> Writing new config file entry for {model_name}...',end='')
new_config = dict(
path=str(diffuser_path),
description=f'Optimized version of {model_name}',
format='diffusers',
)
self.add_model(model_name, new_config, True)
print('done.')
except Exception as e:
print(f'** Conversion failed: {str(e)}')
traceback.print_exc()
return new_config

def del_config(model_name:str, gen, opt, completer):
current_model = gen.model_name
if model_name == current_model:
print("** Can't delete active model. !switch to another model first. **")
return
gen.model_cache.del_model(model_name)
gen.model_cache.commit(opt.conf)
print(f'** {model_name} deleted')
completer.del_model(model_name)

def _make_cache_room(self) -> None:
num_loaded_models = len(self.models)
Expand Down

0 comments on commit b72c878

Please sign in to comment.