Skip to content

Commit

Permalink
Make sure --free_gpu_mem still works when using CKPT-based diffuser m…
Browse files Browse the repository at this point in the history
…odel (#2367)

This PR attempts to fix `--free_gpu_mem` option that was not working in
CKPT-based diffuser model after #1583.

I noticed that the memory usage after #1583 did not decrease after
generating an image when `--free_gpu_mem` option was enabled.
It turns out that the option was not propagated into `Generator`
instance, hence the generation will always run without the memory saving
procedure.

This PR also related to #2326. Initially, I was trying to make
`--free_gpu_mem` works on 🤗 diffuser model as well.
In the process, I noticed that InvokeAI will raise an exception when
`--free_gpu_mem` is enabled.
I tried to quickly fix it by simply ignoring the exception and produce a
warning message to user's console.
  • Loading branch information
lstein authored Jan 24, 2023
2 parents 66babb2 + 10c3afe commit 884768c
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 5 deletions.
14 changes: 9 additions & 5 deletions ldm/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def __init__(
gfpgan=None,
codeformer=None,
esrgan=None,
free_gpu_mem=False,
free_gpu_mem: bool=False,
safety_checker:bool=False,
max_loaded_models:int=2,
# these are deprecated; if present they override values in the conf file
Expand Down Expand Up @@ -460,10 +460,13 @@ def process_image(image,seed):
init_image = None
mask_image = None


if self.free_gpu_mem and self.model.cond_stage_model.device != self.model.device:
self.model.cond_stage_model.device = self.model.device
self.model.cond_stage_model.to(self.model.device)
try:
if self.free_gpu_mem and self.model.cond_stage_model.device != self.model.device:
self.model.cond_stage_model.device = self.model.device
self.model.cond_stage_model.to(self.model.device)
except AttributeError:
print(">> Warning: '--free_gpu_mem' is not yet supported when generating image using model based on HuggingFace Diffuser.")
pass

try:
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
Expand Down Expand Up @@ -531,6 +534,7 @@ def process_image(image,seed):
inpaint_height = inpaint_height,
inpaint_width = inpaint_width,
enable_image_debugging = enable_image_debugging,
free_gpu_mem=self.free_gpu_mem,
)

if init_color:
Expand Down
2 changes: 2 additions & 0 deletions ldm/invoke/ckpt_generator/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,11 @@ def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
safety_checker:dict=None,
attention_maps_callback = None,
free_gpu_mem: bool=False,
**kwargs):
scope = choose_autocast(self.precision)
self.safety_checker = safety_checker
self.free_gpu_mem = free_gpu_mem
attention_maps_images = []
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
make_image = self.get_make_image(
Expand Down
2 changes: 2 additions & 0 deletions ldm/invoke/generator/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,11 @@ def set_variation(self, seed, variation_amount, with_variations):
def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None,
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
safety_checker:dict=None,
free_gpu_mem: bool=False,
**kwargs):
scope = nullcontext
self.safety_checker = safety_checker
self.free_gpu_mem = free_gpu_mem
attention_maps_images = []
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
make_image = self.get_make_image(
Expand Down

0 comments on commit 884768c

Please sign in to comment.