Skip to content

Commit

Permalink
remove cuda requirement for gmss
Browse files Browse the repository at this point in the history
  • Loading branch information
TNTwise committed Dec 7, 2024
1 parent 8fbd3b5 commit 13f7b5c
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 27 deletions.
3 changes: 0 additions & 3 deletions REAL-Video-Enhancer.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,14 +150,11 @@ def __init__(self):
for line in self.fullOutput.lower().split("\n"):
if "half precision support:" in line:
halfPrecisionSupport = "true" in line
if "gmfss support:" in line:
gmfssSupport = "true" in line
settings = Settings()
settings.readSettings()
self.settings = settings
self.processTab = ProcessTab(
parent=self,
gmfssSupport=gmfssSupport,
)
self.homeTab = HomeTab(parent=self)
self.downloadTab = DownloadTab(parent=self)
Expand Down
15 changes: 2 additions & 13 deletions backend/rve-backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@ def __init__(self):
)
else:
half_prec_supp = False
gmfss_supp = False
vram = 0
availableBackends = []
printMSG = ""

Expand All @@ -75,18 +73,13 @@ def __init__(self):
availableBackends.append("pytorch (cuda)")
printMSG += f"PyTorch Version: {torch.__version__}\n"
half_prec_supp = check_bfloat16_support()
gmfss_supp = checkForGMFSS()
vram = get_pytorch_vram()
if checkForPytorchROCM():
availableBackends.append("pytorch (rocm)")
import torch

printMSG += f"PyTorch Version: {torch.__version__}\n"
half_prec_supp = check_bfloat16_support()
try:
vram = get_pytorch_vram()
except Exception:
vram = 0

if checkForNCNN():
availableBackends.append("ncnn")
printMSG += f"NCNN Version: 20220729\n"
Expand All @@ -98,11 +91,7 @@ def __init__(self):
printMSG += f"ONNXruntime Version: {ort.__version__}\n"
half_prec_supp = checkForDirectMLHalfPrecisionSupport()
printMSG += f"Half precision support: {half_prec_supp}\n"
printMSG += f"GMFSS support: {gmfss_supp}\n"
if not gmfss_supp and "pytorch (cuda)" in availableBackends:
printMSG += "Please install CUDA to enable GMFSS\n"
if vram != 0:
printMSG += f"VRAM: {vram}mb\n"

print("Available Backends: " + str(availableBackends))
print(printMSG)

Expand Down
13 changes: 2 additions & 11 deletions src/ui/ProcessTab.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,15 +40,14 @@


class ProcessTab:
def __init__(self, parent, gmfssSupport: bool):
def __init__(self, parent):
self.parent = parent
self.imagePreviewSharedMemoryID = "/image_preview" + str(os.getpid())
self.renderTextOutputList = None
self.currentFrame = 0
self.animationHandler = AnimationHandler()
self.tileUpAnimationHandler = AnimationHandler()
self.tileDownAnimationHandler = AnimationHandler()
self.gmfssSupport = gmfssSupport
# encoder dict
# key is the name in RVE gui
# value is the encoder used
Expand Down Expand Up @@ -97,15 +96,7 @@ def populateModels(self, backend) -> dict:
["None"] + list(interpolateModels.keys())
)
self.parent.upscaleModelComboBox.addItems(["None"] + list(upscaleModels.keys()))
if not self.gmfssSupport:
# Disable specific options based on the selected text
for i in range(self.parent.interpolateModelComboBox.count()):
if "GMFSS" in self.parent.interpolateModelComboBox.itemText(
i
): # hacky solution, just straight copy pasted
self.parent.interpolateModelComboBox.model().item(i).setEnabled(
self.gmfssSupport
)


def onTilingSwitch(self):
if self.parent.tilingCheckBox.isChecked():
Expand Down

0 comments on commit 13f7b5c

Please sign in to comment.