diff --git a/02_building_containers/import_sklearn.py b/02_building_containers/import_sklearn.py index 57991cc43..276111a01 100644 --- a/02_building_containers/import_sklearn.py +++ b/02_building_containers/import_sklearn.py @@ -18,12 +18,11 @@ .pip_install("scikit-learn"), ) -# The `stub.is_inside()` lets us conditionally run code in the global scope. +# The `stub.image.run_inside()` lets us conditionally run code in the global scope. # This is needed because we might not have sklearn and numpy installed locally, -# but we know they are installed inside the custom image. `stub.is_inside()` -# will return `False` when we run this locally, but `True` when it runs in the cloud. +# but we know they are installed inside the custom image. -if stub.is_inside(): +with stub.image.run_inside(): import numpy as np from sklearn import datasets, linear_model diff --git a/03_scaling_out/youtube_face_detection.py b/03_scaling_out/youtube_face_detection.py index f88d6f7eb..eba25abf0 100644 --- a/03_scaling_out/youtube_face_detection.py +++ b/03_scaling_out/youtube_face_detection.py @@ -52,7 +52,7 @@ ) stub = modal.Stub("example-youtube-face-detection", image=image) -if stub.is_inside(): +with stub.image.run_inside(): import cv2 import moviepy.editor import pytube diff --git a/06_gpu_and_ml/blender/blender_video.py b/06_gpu_and_ml/blender/blender_video.py index 5b390849f..d19fc0869 100644 --- a/06_gpu_and_ml/blender/blender_video.py +++ b/06_gpu_and_ml/blender/blender_video.py @@ -66,10 +66,9 @@ # # We need various global configuration that we want to happen inside the containers (but not locally), such as # enabling the GPU device. -# To do this, we use the `stub.is_inside()` conditional, which will evaluate to `False` when the script runs -# locally, but to `True` when imported in the cloud. +# To do this, we use the `stub.image.run.inside()` context manager. -if stub.is_inside(): +with stub.image.run_inside(): import bpy # NOTE: Blender segfaults if you try to do this after the other imports. diff --git a/06_gpu_and_ml/stable_lm/main.py b/06_gpu_and_ml/stable_lm/main.py index 1d5ce438d..549934848 100644 --- a/06_gpu_and_ml/stable_lm/main.py +++ b/06_gpu_and_ml/stable_lm/main.py @@ -215,7 +215,7 @@ def format_prompt(instruction: str) -> str: return f"<|USER|>{instruction}<|ASSISTANT|>" -if stub.is_inside(): +with stub.image.run_inside(): import uuid import msgspec diff --git a/07_web_endpoints/chatbot_spa.py b/07_web_endpoints/chatbot_spa.py index f4666e4dc..af6de1a0f 100644 --- a/07_web_endpoints/chatbot_spa.py +++ b/07_web_endpoints/chatbot_spa.py @@ -40,7 +40,7 @@ def load_tokenizer_and_model(): return tokenizer, model -stub.gpu_image = ( +gpu_image = ( Image.debian_slim() .pip_install("torch", find_links="https://download.pytorch.org/whl/cu116") .pip_install("transformers~=4.31", "accelerate") @@ -48,7 +48,7 @@ def load_tokenizer_and_model(): ) -if stub.is_inside(stub.gpu_image): +with gpu_image.run_inside(): import torch tokenizer, model = load_tokenizer_and_model() @@ -72,7 +72,7 @@ def chat(body: dict = fastapi.Body(...)): return app -@stub.function(gpu="any", image=stub.gpu_image) +@stub.function(gpu="any", image=gpu_image) def generate_response( message: str, id: Optional[str] = None ) -> Tuple[str, str]: diff --git a/misc/news_summarizer.py b/misc/news_summarizer.py index 4503936c2..0d54f8afe 100644 --- a/misc/news_summarizer.py +++ b/misc/news_summarizer.py @@ -44,7 +44,7 @@ def fetch_model(local_files_only: bool = False): return model, tokenizer -stub["deep_learning_image"] = ( +deep_learning_image = ( modal.Image.debian_slim() .pip_install("transformers==4.16.2", "torch", "sentencepiece") .run_function(fetch_model) @@ -52,12 +52,12 @@ def fetch_model(local_files_only: bool = False): # Defining the scraping image is very similar. This image only contains the packages required # to scrape the New York Times website, though; so it's much smaller. -stub["scraping_image"] = modal.Image.debian_slim().pip_install( +scraping_image = modal.Image.debian_slim().pip_install( "requests", "beautifulsoup4", "lxml" ) -if stub.is_inside(stub["scraping_image"]): +with scraping_image.run_inside(): import requests from bs4 import BeautifulSoup @@ -83,9 +83,7 @@ class NYArticle: # Create an environment variable called `NYTIMES_API_KEY` with your API key. -@stub.function( - secret=modal.Secret.from_name("nytimes"), image=stub["scraping_image"] -) +@stub.function(secret=modal.Secret.from_name("nytimes"), image=scraping_image) def latest_science_stories(n_stories: int = 5) -> List[NYArticle]: # query api for latest science articles params = { @@ -120,7 +118,7 @@ def latest_science_stories(n_stories: int = 5) -> List[NYArticle]: # [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) for that. -@stub.function(image=stub["scraping_image"]) +@stub.function(image=scraping_image) def scrape_nyc_article(url: str) -> str: print(f"Scraping article => {url}") @@ -150,7 +148,7 @@ def scrape_nyc_article(url: str) -> str: @stub.function( - image=stub["deep_learning_image"], + image=deep_learning_image, gpu=False, memory=4096, )