From aeb71ab6c26d708302799fb64273e3e9672c7c5b Mon Sep 17 00:00:00 2001 From: Charles Frye Date: Tue, 3 Dec 2024 20:02:27 -0800 Subject: [PATCH] remove deployment from apps that don't need it (#1001) * remove deployment from apps that don't need it * adds back in the deployment of 100k cbxes, used by load-testing example * adds back deployment of trtllm_llama, used by dbt example --- 06_gpu_and_ml/comfyui/comfyapp.py | 1 - 06_gpu_and_ml/controlnet/controlnet_gradio_demos.py | 1 - 06_gpu_and_ml/hyperparameter-sweep/hp_sweep_gpt.py | 1 - 06_gpu_and_ml/llm-serving/chat_with_pdf_vision.py | 4 ---- 06_gpu_and_ml/llm-serving/sgl_vlm.py | 1 - 06_gpu_and_ml/llm-serving/vllm_inference.py | 1 - 06_gpu_and_ml/obj_detection_webcam/webcam.py | 2 +- 06_gpu_and_ml/stable_diffusion/text_to_image.py | 1 - 07_web_endpoints/count_faces.py | 1 - 07_web_endpoints/fasthtml-checkboxes/fasthtml_checkboxes.py | 2 +- 07_web_endpoints/fasthtml_app.py | 1 - 10_integrations/streamlit/serve_streamlit.py | 2 +- 12 files changed, 3 insertions(+), 15 deletions(-) diff --git a/06_gpu_and_ml/comfyui/comfyapp.py b/06_gpu_and_ml/comfyui/comfyapp.py index 41ddc98da..999c17bfa 100644 --- a/06_gpu_and_ml/comfyui/comfyapp.py +++ b/06_gpu_and_ml/comfyui/comfyapp.py @@ -1,6 +1,5 @@ # --- # cmd: ["modal", "serve", "06_gpu_and_ml/comfyui/comfyapp.py"] -# deploy: true # --- # # # Run Flux on ComfyUI interactively and as an API diff --git a/06_gpu_and_ml/controlnet/controlnet_gradio_demos.py b/06_gpu_and_ml/controlnet/controlnet_gradio_demos.py index fdfc55007..cef2483c1 100644 --- a/06_gpu_and_ml/controlnet/controlnet_gradio_demos.py +++ b/06_gpu_and_ml/controlnet/controlnet_gradio_demos.py @@ -1,6 +1,5 @@ # --- # cmd: ["modal", "serve", "06_gpu_and_ml/controlnet/controlnet_gradio_demos.py"] -# deploy: false # tags: ["use-case-image-video-3d", "featured"] # --- # diff --git a/06_gpu_and_ml/hyperparameter-sweep/hp_sweep_gpt.py b/06_gpu_and_ml/hyperparameter-sweep/hp_sweep_gpt.py index 80a34c1f2..e70387d7c 100644 --- a/06_gpu_and_ml/hyperparameter-sweep/hp_sweep_gpt.py +++ b/06_gpu_and_ml/hyperparameter-sweep/hp_sweep_gpt.py @@ -1,5 +1,4 @@ # --- -# deploy: true # cmd: ["modal", "run", "06_gpu_and_ml/hyperparameter-sweep/hp_sweep_gpt.py", "--n-steps", "200", "--n-steps-before-checkpoint", "50", "--n-steps-before-eval", "50"] # --- diff --git a/06_gpu_and_ml/llm-serving/chat_with_pdf_vision.py b/06_gpu_and_ml/llm-serving/chat_with_pdf_vision.py index 8096824c3..f3a75f9a7 100644 --- a/06_gpu_and_ml/llm-serving/chat_with_pdf_vision.py +++ b/06_gpu_and_ml/llm-serving/chat_with_pdf_vision.py @@ -1,7 +1,3 @@ -# --- -# deploy: true -# --- - # # Chat with PDF: RAG with ColQwen2 # In this example, we demonstrate how to use the the [ColQwen2](https://huggingface.co/vidore/colqwen2-v0.1) model to build a simple diff --git a/06_gpu_and_ml/llm-serving/sgl_vlm.py b/06_gpu_and_ml/llm-serving/sgl_vlm.py index 048e77ac1..cf686a73c 100644 --- a/06_gpu_and_ml/llm-serving/sgl_vlm.py +++ b/06_gpu_and_ml/llm-serving/sgl_vlm.py @@ -1,5 +1,4 @@ # --- -# deploy: true # tags: ["use-case-lm-inference", "use-case-image-video-3d"] # --- # # Run LLaVA-Next on SGLang for Visual QA diff --git a/06_gpu_and_ml/llm-serving/vllm_inference.py b/06_gpu_and_ml/llm-serving/vllm_inference.py index c3b4ebe08..884f7297f 100644 --- a/06_gpu_and_ml/llm-serving/vllm_inference.py +++ b/06_gpu_and_ml/llm-serving/vllm_inference.py @@ -1,5 +1,4 @@ # --- -# deploy: true # cmd: ["modal", "serve", "06_gpu_and_ml/llm-serving/vllm_inference.py"] # pytest: false # tags: ["use-case-lm-inference", "featured"] diff --git a/06_gpu_and_ml/obj_detection_webcam/webcam.py b/06_gpu_and_ml/obj_detection_webcam/webcam.py index b2296570a..df7697d85 100644 --- a/06_gpu_and_ml/obj_detection_webcam/webcam.py +++ b/06_gpu_and_ml/obj_detection_webcam/webcam.py @@ -16,7 +16,7 @@ # # ## Live demo # -# [Take a look at the deployed app](https://modal-labs-example-webcam-object-detection-fastapi-app.modal.run/). +# [Take a look at the deployed app](https://modal-labs--example-webcam-object-detection-fastapi-app.modal.run/). # # A couple of caveats: # * This is not optimized for latency: every prediction takes about 1s, and diff --git a/06_gpu_and_ml/stable_diffusion/text_to_image.py b/06_gpu_and_ml/stable_diffusion/text_to_image.py index 56c906b1e..ce384860f 100644 --- a/06_gpu_and_ml/stable_diffusion/text_to_image.py +++ b/06_gpu_and_ml/stable_diffusion/text_to_image.py @@ -2,7 +2,6 @@ # output-directory: "/tmp/stable-diffusion" # args: ["--prompt", "A 1600s oil painting of the New York City skyline"] # tags: ["use-case-image-video-3d"] -# deploy: true # --- # # Run Stable Diffusion 3.5 Large Turbo as a CLI, API, and web UI diff --git a/07_web_endpoints/count_faces.py b/07_web_endpoints/count_faces.py index 3fc5a06a8..0cb25fe33 100644 --- a/07_web_endpoints/count_faces.py +++ b/07_web_endpoints/count_faces.py @@ -1,5 +1,4 @@ # --- -# deploy: true # cmd: ["modal", "serve", "07_web_endpoints/count_faces.py"] # --- diff --git a/07_web_endpoints/fasthtml-checkboxes/fasthtml_checkboxes.py b/07_web_endpoints/fasthtml-checkboxes/fasthtml_checkboxes.py index f3d2b773b..d9758c128 100644 --- a/07_web_endpoints/fasthtml-checkboxes/fasthtml_checkboxes.py +++ b/07_web_endpoints/fasthtml-checkboxes/fasthtml_checkboxes.py @@ -1,6 +1,6 @@ # --- -# deploy: true # cmd: ["modal", "serve", "07_web_endpoints.fasthtml-checkboxes.fasthtml_checkboxes"] +# deploy: true # mypy: ignore-errors # --- diff --git a/07_web_endpoints/fasthtml_app.py b/07_web_endpoints/fasthtml_app.py index 8db612e79..f4b93fefb 100644 --- a/07_web_endpoints/fasthtml_app.py +++ b/07_web_endpoints/fasthtml_app.py @@ -1,5 +1,4 @@ # --- -# deploy: true # cmd: ["modal", "serve", "07_web_endpoints/fasthtml_app.py"] # --- diff --git a/10_integrations/streamlit/serve_streamlit.py b/10_integrations/streamlit/serve_streamlit.py index 37544a789..08e3ce6fe 100644 --- a/10_integrations/streamlit/serve_streamlit.py +++ b/10_integrations/streamlit/serve_streamlit.py @@ -82,5 +82,5 @@ def run(): # modal deploy serve_streamlit.py # ``` # -# If successful, this will print a URL for your app, that you can navigate to from +# If successful, this will print a URL for your app that you can navigate to from # your browser 🎉 .