From dce3555339a0f8883757bc44c358f4d0248e7b1e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 2 Mar 2024 17:16:31 -0500 Subject: [PATCH] Add some tesla pascal GPUs to the fp16 working but slower list. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index adcc0e8ace2..9f9248831ba 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -753,7 +753,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma #FP16 is confirmed working on a 1080 (GP104) but it's a bit slower than FP32 so it should only be enabled #when the model doesn't actually fit on the card #TODO: actually test if GP106 and others have the same type of behavior - nvidia_10_series = ["1080", "1070", "titan x", "p3000", "p3200", "p4000", "p4200", "p5000", "p5200", "p6000", "1060", "1050"] + nvidia_10_series = ["1080", "1070", "titan x", "p3000", "p3200", "p4000", "p4200", "p5000", "p5200", "p6000", "1060", "1050", "p40", "p100", "p6", "p4"] for x in nvidia_10_series: if x in props.name.lower(): fp16_works = True