Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

run_inside -> imports #525

Merged
merged 1 commit into from
Dec 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions 02_building_containers/import_sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@
.pip_install("scikit-learn"),
)

# The `stub.image.run_inside()` lets us conditionally run code in the global scope.
# The `stub.image.imports()` lets us conditionally import in the global scope.
# This is needed because we might not have sklearn and numpy installed locally,
# but we know they are installed inside the custom image.

with stub.image.run_inside():
with stub.image.imports():
import numpy as np
from sklearn import datasets, linear_model

Expand Down
10 changes: 4 additions & 6 deletions 03_scaling_out/youtube_face_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,14 @@
)
stub = modal.Stub("example-youtube-face-detection", image=image)

with stub.image.run_inside():
with image.imports():
import cv2
import moviepy.editor
import pytube

# For temporary storage and sharing of downloaded movie clips, we use a network file system.

stub.net_file_system = modal.NetworkFileSystem.new()
net_file_system = modal.NetworkFileSystem.new()

# ### Face detection function
#
Expand All @@ -73,9 +73,7 @@
# and stores the resulting video back to the shared storage.


@stub.function(
network_file_systems={"/clips": stub.net_file_system}, timeout=600
)
@stub.function(network_file_systems={"/clips": net_file_system}, timeout=600)
def detect_faces(fn, start, stop):
# Extract the subclip from the video
clip = moviepy.editor.VideoFileClip(fn).subclip(start, stop)
Expand Down Expand Up @@ -108,7 +106,7 @@ def detect_faces(fn, start, stop):
# 3. Stitch the results back into a new video


@stub.function(network_file_systems={"/clips": stub.net_file_system}, retries=1)
@stub.function(network_file_systems={"/clips": net_file_system}, retries=1)
def process_video(url):
print(f"Downloading video from '{url}'")
yt = pytube.YouTube(url)
Expand Down
31 changes: 15 additions & 16 deletions 06_gpu_and_ml/embeddings/text_embeddings_inference.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
import json
import os
import socket
import subprocess
from pathlib import Path

Expand All @@ -23,8 +26,6 @@


def spawn_server() -> subprocess.Popen:
import socket

process = subprocess.Popen(["text-embeddings-router"] + LAUNCH_FLAGS)

# Poll until webserver at 127.0.0.1:8000 accepts connections before running inputs.
Expand Down Expand Up @@ -64,8 +65,9 @@ def download_model():
)


with tei_image.run_inside():
with tei_image.imports():
import numpy as np
from httpx import AsyncClient


@stub.cls(
Expand All @@ -79,8 +81,6 @@ def download_model():
)
class TextEmbeddingsInference:
def __enter__(self):
from httpx import AsyncClient

self.process = spawn_server()
self.client = AsyncClient(base_url="http://127.0.0.1:8000")

Expand All @@ -101,12 +101,6 @@ async def embed(self, inputs_with_ids: list[tuple[int, str]]):


def download_data():
import json
import os

from google.cloud import bigquery
from google.oauth2 import service_account

service_account_info = json.loads(os.environ["SERVICE_ACCOUNT_JSON"])
credentials = service_account.Credentials.from_service_account_info(
service_account_info
Expand All @@ -131,16 +125,21 @@ def download_data():
volume.commit()


image = Image.debian_slim().pip_install(
"google-cloud-bigquery", "pandas", "db-dtypes", "tqdm"
)

with image.imports():
from google.cloud import bigquery
from google.oauth2 import service_account


@stub.function(
image=Image.debian_slim().pip_install(
"google-cloud-bigquery", "pandas", "db-dtypes", "tqdm"
),
image=image,
secrets=[Secret.from_name("bigquery")],
volumes={DATA_PATH.parent: volume},
)
def embed_dataset():
import json

model = TextEmbeddingsInference()

if not DATA_PATH.exists():
Expand Down
2 changes: 1 addition & 1 deletion 07_web_endpoints/chatbot_spa.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def load_tokenizer_and_model():
)


with gpu_image.run_inside():
with gpu_image.imports():
import torch

tokenizer, model = load_tokenizer_and_model()
Expand Down