Skip to content

Commit

Permalink
Switch some more prints to logging.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Mar 11, 2024
1 parent 0ed72be commit 2a813c3
Show file tree
Hide file tree
Showing 10 changed files with 40 additions and 34 deletions.
15 changes: 8 additions & 7 deletions comfy/ldm/modules/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
import logging

from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding
from .sub_quadratic_attention import efficient_dot_product_attention
Expand All @@ -20,7 +21,7 @@

# CrossAttn precision handling
if args.dont_upcast_attention:
print("disabling upcasting of attention")
logging.info("disabling upcasting of attention")
_ATTN_PRECISION = "fp16"
else:
_ATTN_PRECISION = "fp32"
Expand Down Expand Up @@ -274,12 +275,12 @@ def attention_split(q, k, v, heads, mask=None):
model_management.soft_empty_cache(True)
if cleared_cache == False:
cleared_cache = True
print("out of memory error, emptying cache and trying again")
logging.warning("out of memory error, emptying cache and trying again")
continue
steps *= 2
if steps > 64:
raise e
print("out of memory error, increasing steps and trying again", steps)
logging.warning("out of memory error, increasing steps and trying again {}".format(steps))
else:
raise e

Expand Down Expand Up @@ -351,17 +352,17 @@ def attention_pytorch(q, k, v, heads, mask=None):
optimized_attention = attention_basic

if model_management.xformers_enabled():
print("Using xformers cross attention")
logging.info("Using xformers cross attention")
optimized_attention = attention_xformers
elif model_management.pytorch_attention_enabled():
print("Using pytorch cross attention")
logging.info("Using pytorch cross attention")
optimized_attention = attention_pytorch
else:
if args.use_split_cross_attention:
print("Using split optimization for cross attention")
logging.info("Using split optimization for cross attention")
optimized_attention = attention_split
else:
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
logging.info("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
optimized_attention = attention_sub_quad

optimized_attention_masked = optimized_attention
Expand Down
13 changes: 7 additions & 6 deletions comfy/ldm/modules/diffusionmodules/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import numpy as np
from einops import rearrange
from typing import Optional, Any
import logging

from comfy import model_management
import comfy.ops
Expand Down Expand Up @@ -190,7 +191,7 @@ def slice_attention(q, k, v):
steps *= 2
if steps > 128:
raise e
print("out of memory error, increasing steps and trying again", steps)
logging.warning("out of memory error, increasing steps and trying again {}".format(steps))

return r1

Expand Down Expand Up @@ -235,7 +236,7 @@ def pytorch_attention(q, k, v):
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
out = out.transpose(2, 3).reshape(B, C, H, W)
except model_management.OOM_EXCEPTION as e:
print("scaled_dot_product_attention OOMed: switched to slice attention")
logging.warning("scaled_dot_product_attention OOMed: switched to slice attention")
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
return out

Expand Down Expand Up @@ -268,13 +269,13 @@ def __init__(self, in_channels):
padding=0)

if model_management.xformers_enabled_vae():
print("Using xformers attention in VAE")
logging.info("Using xformers attention in VAE")
self.optimized_attention = xformers_attention
elif model_management.pytorch_attention_enabled():
print("Using pytorch attention in VAE")
logging.info("Using pytorch attention in VAE")
self.optimized_attention = pytorch_attention
else:
print("Using split attention in VAE")
logging.info("Using split attention in VAE")
self.optimized_attention = normal_attention

def forward(self, x):
Expand Down Expand Up @@ -562,7 +563,7 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
logging.debug("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))

# z to block_in
Expand Down
5 changes: 3 additions & 2 deletions comfy/ldm/modules/diffusionmodules/openaimodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import logging

from .util import (
checkpoint,
Expand Down Expand Up @@ -359,7 +360,7 @@ def apply_control(h, control, name):
try:
h += ctrl
except:
print("warning control could not be applied", h.shape, ctrl.shape)
logging.warning("warning control could not be applied {} {}".format(h.shape, ctrl.shape))
return h

class UNetModel(nn.Module):
Expand Down Expand Up @@ -496,7 +497,7 @@ def __init__(
if isinstance(self.num_classes, int):
self.label_emb = nn.Embedding(num_classes, time_embed_dim, dtype=self.dtype, device=device)
elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
logging.debug("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim)
elif self.num_classes == "sequential":
assert adm_in_channels is not None
Expand Down
3 changes: 2 additions & 1 deletion comfy/ldm/modules/sub_quadratic_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from torch import Tensor
from torch.utils.checkpoint import checkpoint
import math
import logging

try:
from typing import Optional, NamedTuple, List, Protocol
Expand Down Expand Up @@ -170,7 +171,7 @@ def _get_attention_scores_no_kv_chunking(
attn_probs = attn_scores.softmax(dim=-1)
del attn_scores
except model_management.OOM_EXCEPTION:
print("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead")
logging.warning("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead")
attn_scores -= attn_scores.max(dim=-1, keepdim=True).values
torch.exp(attn_scores, out=attn_scores)
summed = torch.sum(attn_scores, dim=-1, keepdim=True)
Expand Down
3 changes: 2 additions & 1 deletion comfy/samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import collections
from comfy import model_management
import math
import logging

def get_area_and_mult(conds, x_in, timestep_in):
area = (x_in.shape[2], x_in.shape[3], 0, 0)
Expand Down Expand Up @@ -625,7 +626,7 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps):
elif scheduler_name == "sgm_uniform":
sigmas = normal_scheduler(model, steps, sgm=True)
else:
print("error invalid scheduler", scheduler_name)
logging.error("error invalid scheduler {}".format(scheduler_name))
return sigmas

def sampler_object(name):
Expand Down
6 changes: 3 additions & 3 deletions comfy_extras/nodes_freelunch.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#code originally taken from: https://github.com/ChenyangSi/FreeU (under MIT License)

import torch

import logging

def Fourier_filter(x, threshold, scale):
# FFT
Expand Down Expand Up @@ -49,7 +49,7 @@ def output_block_patch(h, hsp, transformer_options):
try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except:
print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.")
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device))
on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else:
Expand Down Expand Up @@ -95,7 +95,7 @@ def output_block_patch(h, hsp, transformer_options):
try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except:
print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.")
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device))
on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else:
Expand Down
3 changes: 2 additions & 1 deletion comfy_extras/nodes_hypernetwork.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import comfy.utils
import folder_paths
import torch
import logging

def load_hypernetwork_patch(path, strength):
sd = comfy.utils.load_torch_file(path, safe_load=True)
Expand All @@ -23,7 +24,7 @@ def load_hypernetwork_patch(path, strength):
}

if activation_func not in valid_activation:
print("Unsupported Hypernetwork format, if you report it I might implement it.", path, " ", activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout)
logging.error("Unsupported Hypernetwork format, if you report it I might implement it. {} {} {} {} {} {}".format(path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout))
return None

out = {}
Expand Down
18 changes: 9 additions & 9 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ def execute_script(script_path):
import gc

from comfy.cli_args import args
import logging

if os.name == "nt":
import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())

if __name__ == "__main__":
if args.cuda_device is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
print("Set cuda device to:", args.cuda_device)
logging.info("Set cuda device to: {}".format(args.cuda_device))

if args.deterministic:
if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
Expand All @@ -88,7 +88,7 @@ def cuda_malloc_warning():
if b in device_name:
cuda_malloc_warning = True
if cuda_malloc_warning:
print("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")
logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")

def prompt_worker(q, server):
e = execution.PromptExecutor(server)
Expand Down Expand Up @@ -121,7 +121,7 @@ def prompt_worker(q, server):

current_time = time.perf_counter()
execution_time = current_time - execution_start_time
print("Prompt executed in {:.2f} seconds".format(execution_time))
logging.info("Prompt executed in {:.2f} seconds".format(execution_time))

flags = q.get_flags()
free_memory = flags.get("free_memory", False)
Expand Down Expand Up @@ -182,14 +182,14 @@ def load_extra_path_config(yaml_path):
full_path = y
if base_path is not None:
full_path = os.path.join(base_path, full_path)
print("Adding extra search path", x, full_path)
logging.info("Adding extra search path {} {}".format(x, full_path))
folder_paths.add_model_folder_path(x, full_path)


if __name__ == "__main__":
if args.temp_directory:
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
print(f"Setting temp directory to: {temp_dir}")
logging.info(f"Setting temp directory to: {temp_dir}")
folder_paths.set_temp_directory(temp_dir)
cleanup_temp()

Expand Down Expand Up @@ -224,7 +224,7 @@ def load_extra_path_config(yaml_path):

if args.output_directory:
output_dir = os.path.abspath(args.output_directory)
print(f"Setting output directory to: {output_dir}")
logging.info(f"Setting output directory to: {output_dir}")
folder_paths.set_output_directory(output_dir)

#These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
Expand All @@ -234,7 +234,7 @@ def load_extra_path_config(yaml_path):

if args.input_directory:
input_dir = os.path.abspath(args.input_directory)
print(f"Setting input directory to: {input_dir}")
logging.info(f"Setting input directory to: {input_dir}")
folder_paths.set_input_directory(input_dir)

if args.quick_test_for_ci:
Expand All @@ -252,6 +252,6 @@ def startup_server(address, port):
try:
loop.run_until_complete(run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start))
except KeyboardInterrupt:
print("\nStopped server")
logging.info("\nStopped server")

cleanup_temp()
2 changes: 1 addition & 1 deletion nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1904,7 +1904,7 @@ def load_custom_node(module_path, ignore=set()):
return False
except Exception as e:
logging.warning(traceback.format_exc())
logging.warning(f"Cannot import {module_path} module for custom nodes:", e)
logging.warning(f"Cannot import {module_path} module for custom nodes: {e}")
return False

def load_custom_nodes():
Expand Down
6 changes: 3 additions & 3 deletions server.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,8 +413,8 @@ async def get_object_info(request):
try:
out[x] = node_info(x)
except Exception as e:
print(f"[ERROR] An error occurred while retrieving information for the '{x}' node.", file=sys.stderr)
traceback.print_exc()
logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.")
logging.error(traceback.format_exc())
return web.json_response(out)

@routes.get("/object_info/{node_class}")
Expand Down Expand Up @@ -641,6 +641,6 @@ def trigger_on_prompt(self, json_data):
json_data = handler(json_data)
except Exception as e:
logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing")
traceback.print_exc()
logging.warning(traceback.format_exc())

return json_data

0 comments on commit 2a813c3

Please sign in to comment.