Skip to content

Commit

Permalink
Change all is_inside to run_inside (#475)
Browse files Browse the repository at this point in the history
* Change all is_inside to run_inside

* use images not assigned to the stub in some cases
  • Loading branch information
erikbern authored Nov 8, 2023
1 parent 28f970e commit 3771625
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 20 deletions.
7 changes: 3 additions & 4 deletions 02_building_containers/import_sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,11 @@
.pip_install("scikit-learn"),
)

# The `stub.is_inside()` lets us conditionally run code in the global scope.
# The `stub.image.run_inside()` lets us conditionally run code in the global scope.
# This is needed because we might not have sklearn and numpy installed locally,
# but we know they are installed inside the custom image. `stub.is_inside()`
# will return `False` when we run this locally, but `True` when it runs in the cloud.
# but we know they are installed inside the custom image.

if stub.is_inside():
with stub.image.run_inside():
import numpy as np
from sklearn import datasets, linear_model

Expand Down
2 changes: 1 addition & 1 deletion 03_scaling_out/youtube_face_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
)
stub = modal.Stub("example-youtube-face-detection", image=image)

if stub.is_inside():
with stub.image.run_inside():
import cv2
import moviepy.editor
import pytube
Expand Down
5 changes: 2 additions & 3 deletions 06_gpu_and_ml/blender/blender_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,9 @@
#
# We need various global configuration that we want to happen inside the containers (but not locally), such as
# enabling the GPU device.
# To do this, we use the `stub.is_inside()` conditional, which will evaluate to `False` when the script runs
# locally, but to `True` when imported in the cloud.
# To do this, we use the `stub.image.run.inside()` context manager.

if stub.is_inside():
with stub.image.run_inside():
import bpy

# NOTE: Blender segfaults if you try to do this after the other imports.
Expand Down
2 changes: 1 addition & 1 deletion 06_gpu_and_ml/stable_lm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def format_prompt(instruction: str) -> str:
return f"<|USER|>{instruction}<|ASSISTANT|>"


if stub.is_inside():
with stub.image.run_inside():
import uuid

import msgspec
Expand Down
6 changes: 3 additions & 3 deletions 07_web_endpoints/chatbot_spa.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,15 +40,15 @@ def load_tokenizer_and_model():
return tokenizer, model


stub.gpu_image = (
gpu_image = (
Image.debian_slim()
.pip_install("torch", find_links="https://download.pytorch.org/whl/cu116")
.pip_install("transformers~=4.31", "accelerate")
.run_function(load_tokenizer_and_model)
)


if stub.is_inside(stub.gpu_image):
with gpu_image.run_inside():
import torch

tokenizer, model = load_tokenizer_and_model()
Expand All @@ -72,7 +72,7 @@ def chat(body: dict = fastapi.Body(...)):
return app


@stub.function(gpu="any", image=stub.gpu_image)
@stub.function(gpu="any", image=gpu_image)
def generate_response(
message: str, id: Optional[str] = None
) -> Tuple[str, str]:
Expand Down
14 changes: 6 additions & 8 deletions misc/news_summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,20 +44,20 @@ def fetch_model(local_files_only: bool = False):
return model, tokenizer


stub["deep_learning_image"] = (
deep_learning_image = (
modal.Image.debian_slim()
.pip_install("transformers==4.16.2", "torch", "sentencepiece")
.run_function(fetch_model)
)

# Defining the scraping image is very similar. This image only contains the packages required
# to scrape the New York Times website, though; so it's much smaller.
stub["scraping_image"] = modal.Image.debian_slim().pip_install(
scraping_image = modal.Image.debian_slim().pip_install(
"requests", "beautifulsoup4", "lxml"
)


if stub.is_inside(stub["scraping_image"]):
with scraping_image.run_inside():
import requests
from bs4 import BeautifulSoup

Expand All @@ -83,9 +83,7 @@ class NYArticle:
# Create an environment variable called `NYTIMES_API_KEY` with your API key.


@stub.function(
secret=modal.Secret.from_name("nytimes"), image=stub["scraping_image"]
)
@stub.function(secret=modal.Secret.from_name("nytimes"), image=scraping_image)
def latest_science_stories(n_stories: int = 5) -> List[NYArticle]:
# query api for latest science articles
params = {
Expand Down Expand Up @@ -120,7 +118,7 @@ def latest_science_stories(n_stories: int = 5) -> List[NYArticle]:
# [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) for that.


@stub.function(image=stub["scraping_image"])
@stub.function(image=scraping_image)
def scrape_nyc_article(url: str) -> str:
print(f"Scraping article => {url}")

Expand Down Expand Up @@ -150,7 +148,7 @@ def scrape_nyc_article(url: str) -> str:


@stub.function(
image=stub["deep_learning_image"],
image=deep_learning_image,
gpu=False,
memory=4096,
)
Expand Down

0 comments on commit 3771625

Please sign in to comment.