Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mounts on images #995

Merged
merged 22 commits into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 6 additions & 7 deletions 06_gpu_and_ml/comfyui/comfyapp.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,11 @@
.run_commands( # use comfy-cli to install the ComfyUI repo and its dependencies
"comfy --skip-prompt install --nvidia"
)
.add_local_file(
Path(__file__).parent / "workflow_api.json",
"/root/workflow_api.json",
copy=True,
)
)
# ### Downloading custom nodes
# We'll use `comfy-cli` to download custom nodes, in this case the popular WAS Node Suite pack.
Expand All @@ -86,7 +91,7 @@
# ### Downloading models

# You can also use comfy-cli to download models, but for this example we'll download the Flux models directly from Hugging Face into a Modal Volume.
# Then on container start, we'll mount our models into the ComfyUI models directory.
# Then on container start, we'll mount our models Volume into the ComfyUI models directory.
# This allows us to avoid re-downloading the models every time you rebuild your image.

image = (
Expand Down Expand Up @@ -182,12 +187,6 @@ def ui():
allow_concurrent_inputs=10,
container_idle_timeout=300,
gpu="A10G",
mounts=[
modal.Mount.from_local_file(
Path(__file__).parent / "workflow_api.json",
"/root/workflow_api.json",
),
],
volumes={"/root/comfy/ComfyUI/models": vol},
)
class ComfyUI:
Expand Down
9 changes: 6 additions & 3 deletions 06_gpu_and_ml/dreambooth/diffusers_lora_finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,14 +421,17 @@ class AppConfig(SharedConfig):
guidance_scale: float = 6


assets_path = Path(__file__).parent / "assets"
image_with_assets = image.add_local_dir(
# Add local web assets to the image
Path(__file__).parent / "assets",
remote_path="/assets",
)


@app.function(
image=image,
image=image_with_assets,
concurrency_limit=1,
allow_concurrent_inputs=1000,
mounts=[modal.Mount.from_local_dir(assets_path, remote_path="/assets")],
)
@modal.asgi_app()
def fastapi_app():
Expand Down
21 changes: 8 additions & 13 deletions 06_gpu_and_ml/hyperparameter-sweep/hp_sweep_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
import logging as L
import urllib.request
from dataclasses import dataclass
from pathlib import Path
from pathlib import Path, PosixPath

import modal
from pydantic import BaseModel
Expand All @@ -74,7 +74,7 @@
volume = modal.Volume.from_name(
"example-hp-sweep-gpt-volume", create_if_missing=True
)
volume_path = Path("/vol/data")
volume_path = PosixPath("/vol/data")
model_filename = "nano_gpt_model.pt"
best_model_filename = "best_nano_gpt_model.pt"
tb_log_path = volume_path / "tb_logs"
Expand All @@ -92,24 +92,22 @@
"torch==2.1.2",
"tensorboard==2.17.1",
"numpy<2",
)
).add_local_dir(Path(__file__).parent / "src", remote_path="/root/src")

# We also have some local dependencies that we'll need to import into the remote environment.
# We mount them onto the remote container.

mounts = [
modal.Mount.from_local_dir(
Path(__file__).parent / "src", remote_path=Path("/root/src")
)
]

# We'll serve a simple web endpoint
web_image = base_image.pip_install(
"fastapi[standard]==0.115.4", "starlette==0.41.2"
)

# And we'll deploy a web UI for interacting with our trained models using Gradio.
ui_image = web_image.pip_install("gradio~=4.44.0")
assets_path = Path(__file__).parent / "assets"
ui_image = web_image.pip_install("gradio~=4.44.0").add_local_dir(
assets_path, remote_path="/assets"
)


# We can also "pre-import" libraries that will be used by the functions we run on Modal in a given image
# using the `with image.imports` context manager.
Expand Down Expand Up @@ -139,7 +137,6 @@

@app.function(
image=torch_image,
mounts=mounts,
volumes={volume_path: volume},
gpu=gpu,
timeout=1 * HOURS,
Expand Down Expand Up @@ -575,15 +572,13 @@ def web_generate(request: GenerationRequest):
# The Gradio UI will look something like this:

# ![Image of Gradio Web App. Top shows model selection dropdown. Left side shows input prompt textbox. Right side shows SLM generated output. Bottom has button for starting generation process](./gradio.png)
assets_path = Path(__file__).parent / "assets"


@app.function(
image=ui_image,
concurrency_limit=1,
volumes={volume_path: volume},
allow_concurrent_inputs=1000,
mounts=[modal.Mount.from_local_dir(assets_path, remote_path="/assets")],
)
@modal.asgi_app()
def ui():
Expand Down
5 changes: 3 additions & 2 deletions 06_gpu_and_ml/obj_detection_webcam/webcam.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,9 @@ def detect(self, img_data_in):


@app.function(
image=modal.Image.debian_slim().pip_install("fastapi[standard]"),
mounts=[modal.Mount.from_local_dir(static_path, remote_path="/assets")],
image=modal.Image.debian_slim()
.pip_install("fastapi[standard]")
.add_local_dir(static_path, remote_path="/assets")
)
@modal.asgi_app(label="example-webcam-object-detection")
def fastapi_app():
Expand Down
8 changes: 2 additions & 6 deletions 06_gpu_and_ml/openai_whisper/pod_transcriber/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,7 @@ def populate_podcast_metadata(podcast_id: str):


@app.function(
mounts=[
modal.Mount.from_local_dir(config.ASSETS_PATH, remote_path="/assets")
],
image=app_image.add_local_dir(config.ASSETS_PATH, remote_path="/assets"),
network_file_systems={config.CACHE_DIR: volume},
keep_warm=2,
)
Expand All @@ -112,9 +110,7 @@ def fastapi_app():
return web_app


@app.function(
image=app_image,
)
@app.function()
def search_podcast(name):
from gql import gql

Expand Down
7 changes: 4 additions & 3 deletions 06_gpu_and_ml/stable_diffusion/text_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,14 +221,15 @@ def entrypoint(

frontend_path = Path(__file__).parent / "frontend"

web_image = modal.Image.debian_slim(python_version="3.12").pip_install(
"jinja2==3.1.4", "fastapi[standard]==0.115.4"
web_image = (
modal.Image.debian_slim(python_version="3.12")
.pip_install("jinja2==3.1.4", "fastapi[standard]==0.115.4")
.add_local_dir(frontend_path, remote_path="/assets")
)


@app.function(
image=web_image,
mounts=[modal.Mount.from_local_dir(frontend_path, remote_path="/assets")],
allow_concurrent_inputs=1000,
)
@modal.asgi_app()
Expand Down
1 change: 1 addition & 0 deletions 06_gpu_and_ml/text-to-pokemon/text_to_pokemon/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,5 +206,6 @@ def null_safety(images, **kwargs):
"fastapi[standard]",
)
.run_function(load_stable_diffusion_pokemon_model)
.add_local_dir(local_path=ASSETS_PATH, remote_path="/assets")
)
app = modal.App(name="example-text-to-pokemon", image=image)
8 changes: 1 addition & 7 deletions 06_gpu_and_ml/text-to-pokemon/text_to_pokemon/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,13 +129,7 @@ def diskcached_text_to_pokemon(prompt: str) -> list[bytes]:
return samples_data


@app.function(
mounts=[
modal.Mount.from_local_dir(
local_path=config.ASSETS_PATH, remote_path="/assets"
)
],
)
@app.function()
@modal.asgi_app()
def fastapi_app():
import fastapi.staticfiles
Expand Down
11 changes: 4 additions & 7 deletions 07_web_endpoints/fasthtml-checkboxes/fasthtml_checkboxes.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,14 @@
db = modal.Dict.from_name("example-checkboxes-db", create_if_missing=True)

css_path_local = Path(__file__).parent / "styles.css"
css_path_remote = Path("/assets/styles.css")
css_path_remote = "/assets/styles.css"


@app.function(
image=modal.Image.debian_slim(python_version="3.12").pip_install(
"python-fasthtml==0.6.9", "inflect~=7.4.0"
),
image=modal.Image.debian_slim(python_version="3.12")
.pip_install("python-fasthtml==0.6.9", "inflect~=7.4.0")
.add_local_file(css_path_local, remote_path=css_path_remote),
concurrency_limit=1, # we currently maintain state in memory, so we restrict the server to one worker
mounts=[
modal.Mount.from_local_file(css_path_local, remote_path=css_path_remote)
],
allow_concurrent_inputs=1000,
)
@modal.asgi_app()
Expand Down
27 changes: 15 additions & 12 deletions 09_job_queues/doc_ocr_webapp.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,28 +75,31 @@ async def poll_results(call_id: str):
return result


# Finally, we mount the static files for our front-end. We've made [a simple React
# Specify a container image containing the version of fastapi we want to use to serve
# our web app
fast_api_image = modal.Image.debian_slim(python_version="3.12").pip_install(
"fastapi[standard]==0.115.4"
)

# Finally, we add the static files for our front-end. We've made [a simple React
# app](https://github.com/modal-labs/modal-examples/tree/main/09_job_queues/doc_ocr_frontend)
# that hits the two endpoints defined above. To package these files with our app, first
# we get the local assets path, and then create a modal [`Mount`](https://modal.com/docs/guide/local-data#mounting-directories)
# that mounts this directory at `/assets` inside our container. Then, we instruct FastAPI to [serve
# that hits the two endpoints defined above. To package these files with our app, we use
# add_local_dir with the local directory of the assets, and specify that we want them
# in the `/assets` directory inside our container (the `remote_path`). Then, we instruct FastAPI to [serve
# this static file directory](https://fastapi.tiangolo.com/tutorial/static-files/) at our root path.

assets_path = Path(__file__).parent / "doc_ocr_frontend"
local_assets_path = Path(__file__).parent / "doc_ocr_frontend"
ocr_app_image = fast_api_image.add_local_dir(
local_assets_path, remote_path="/assets"
)


@app.function(
image=modal.Image.debian_slim(python_version="3.12").pip_install(
"fastapi[standard]==0.115.4"
),
mounts=[modal.Mount.from_local_dir(assets_path, remote_path="/assets")],
)
@app.function(image=ocr_app_image)
@modal.asgi_app()
def wrapper():
web_app.mount(
"/", fastapi.staticfiles.StaticFiles(directory="/assets", html=True)
)

return web_app


Expand Down
39 changes: 16 additions & 23 deletions 10_integrations/dbt/dbt_duckdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@
PROJ_PATH = "/root/dbt" # remote paths
PROFILES_PATH = "/root/dbt_profile"
TARGET_PATH = "/root/target"
# Most of the DBT code and configuration is taken directly from the classic
# [Jaffle Shop](https://github.com/dbt-labs/jaffle_shop) demo and modified to support
# using `dbt-duckdb` with an S3 bucket.

# The DBT `profiles.yml` configuration is taken from
# [the `dbt-duckdb` docs](https://github.com/jwills/dbt-duckdb#configuring-your-profile).

# We also define the environment our application will run in --
# a container image, as in Docker.
Expand All @@ -56,27 +62,17 @@
"DBT_TARGET_PATH": TARGET_PATH,
}
)
# Here we add all local code and configuration into the Modal Image
# so that it will be available when we run DBT on Modal.
.add_local_dir(LOCAL_DBT_PROJECT, remote_path=PROJ_PATH)
.add_local_file(
LOCAL_DBT_PROJECT / "profiles.yml",
remote_path=f"{PROFILES_PATH}/profiles.yml",
)
)

app = modal.App(name="example-dbt-duckdb-s3", image=dbt_image)

# Most of the DBT code and configuration is taken directly from the classic
# [Jaffle Shop](https://github.com/dbt-labs/jaffle_shop) demo and modified to support
# using `dbt-duckdb` with an S3 bucket.

# The DBT `profiles.yml` configuration is taken from
# [the `dbt-duckdb` docs](https://github.com/jwills/dbt-duckdb#configuring-your-profile).

# Here we mount all this local code and configuration into the Modal Function
# so that it will be available when we run DBT on Modal.

dbt_project = modal.Mount.from_local_dir(
LOCAL_DBT_PROJECT, remote_path=PROJ_PATH
)
dbt_profiles = modal.Mount.from_local_file(
local_path=LOCAL_DBT_PROJECT / "profiles.yml",
remote_path=Path(PROFILES_PATH, "profiles.yml"),
)
dbt_target = modal.Volume.from_name("dbt-target-vol", create_if_missing=True)

# We'll also need to authenticate with AWS to store data in S3.
Expand Down Expand Up @@ -133,7 +129,6 @@


@app.function(
mounts=[dbt_project],
secrets=[s3_secret],
)
def create_source_data():
Expand Down Expand Up @@ -168,15 +163,14 @@ def create_source_data():
# And DBT is a Python tool, so it's easy to run DBT with Modal:
# below, we import the `dbt` library's `dbtRunner` to pass commands from our
# Python code, running on Modal, the same way we'd pass commands on a command line.

# Note that this Modal Function has access to our AWS Secret,
# the `mount`ed local files with our DBT project and profiles,
#
# Note that this Modal Function has access to our AWS S3 Secret,
# the local files associated with our DBT project and profiles,
# and a remote Modal Volume that acts as a distributed file system.


@app.function(
secrets=[s3_secret],
mounts=[dbt_project, dbt_profiles],
volumes={TARGET_PATH: dbt_target},
)
def run(command: str) -> None:
Expand Down Expand Up @@ -279,7 +273,6 @@ def serve_dbt_docs():
@app.function(
schedule=modal.Period(days=1),
secrets=[s3_secret],
mounts=[dbt_project, dbt_profiles],
volumes={TARGET_PATH: dbt_target},
)
def daily_build() -> None:
Expand Down
Loading