diff --git a/06_gpu_and_ml/diffusers/train_and_serve_diffusers_script.py b/06_gpu_and_ml/diffusers/train_and_serve_diffusers_script.py index 97f305cd2..fec3df6e5 100644 --- a/06_gpu_and_ml/diffusers/train_and_serve_diffusers_script.py +++ b/06_gpu_and_ml/diffusers/train_and_serve_diffusers_script.py @@ -110,7 +110,7 @@ # import huggingface_hub # # login to huggingface -# hf_key = os.environ["HUGGINGFACE_TOKEN"] +# hf_key = os.environ["HF_TOKEN"] # huggingface_hub.login(hf_key) # dataset = load_dataset("imagefolder", data_dir="/lg_white_bg_heroicon_png_img", split="train") @@ -285,7 +285,7 @@ def train(): write_basic_config(mixed_precision="fp16") # authenticate to hugging face so we can download the model weights - hf_key = os.environ["HUGGINGFACE_TOKEN"] + hf_key = os.environ["HF_TOKEN"] huggingface_hub.login(hf_key) # check whether we can access the model repo diff --git a/06_gpu_and_ml/embeddings/text_embeddings_inference.py b/06_gpu_and_ml/embeddings/text_embeddings_inference.py index a88fc78a2..fdb5c8c2a 100644 --- a/06_gpu_and_ml/embeddings/text_embeddings_inference.py +++ b/06_gpu_and_ml/embeddings/text_embeddings_inference.py @@ -33,7 +33,7 @@ def spawn_server() -> subprocess.Popen: ["text-embeddings-router"] + LAUNCH_FLAGS, env={ **os.environ, - "HUGGING_FACE_HUB_TOKEN": os.environ["HUGGINGFACE_TOKEN"], + "HUGGING_FACE_HUB_TOKEN": os.environ["HF_TOKEN"], }, ) diff --git a/06_gpu_and_ml/openai_whisper/finetuning/train/__main__.py b/06_gpu_and_ml/openai_whisper/finetuning/train/__main__.py index d9424115e..ee0a9b58a 100644 --- a/06_gpu_and_ml/openai_whisper/finetuning/train/__main__.py +++ b/06_gpu_and_ml/openai_whisper/finetuning/train/__main__.py @@ -171,13 +171,13 @@ def __call__( "mozilla-foundation/common_voice_11_0", "hi", split="train+validation", - use_auth_token=os.environ["HUGGINGFACE_TOKEN"], + use_auth_token=os.environ["HF_TOKEN"], ) raw_datasets["eval"] = load_dataset( "mozilla-foundation/common_voice_11_0", "hi", split="test", - use_auth_token=os.environ["HUGGINGFACE_TOKEN"], + use_auth_token=os.environ["HF_TOKEN"], ) # Most ASR datasets only provide input audio samples (audio) and @@ -211,7 +211,7 @@ def __call__( else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=os.environ["HUGGINGFACE_TOKEN"], + use_auth_token=os.environ["HF_TOKEN"], ) config.update( diff --git a/06_gpu_and_ml/stable_diffusion/stable_diffusion_aitemplate.py b/06_gpu_and_ml/stable_diffusion/stable_diffusion_aitemplate.py index f5cc166cb..96b66add2 100644 --- a/06_gpu_and_ml/stable_diffusion/stable_diffusion_aitemplate.py +++ b/06_gpu_and_ml/stable_diffusion/stable_diffusion_aitemplate.py @@ -58,7 +58,7 @@ def download_and_compile(): MODEL_ID, revision="fp16", torch_dtype=torch.float16, - use_auth_token=os.environ["HUGGINGFACE_TOKEN"], + use_auth_token=os.environ["HF_TOKEN"], ).save_pretrained(MODEL_PATH, safe_serialization=True) diffusers.EulerDiscreteScheduler.from_pretrained( diff --git a/06_gpu_and_ml/text_generation_inference.py b/06_gpu_and_ml/text_generation_inference.py index 1c9f950e1..5ed75799a 100644 --- a/06_gpu_and_ml/text_generation_inference.py +++ b/06_gpu_and_ml/text_generation_inference.py @@ -61,7 +61,7 @@ def download_model(): ], env={ **os.environ, - "HUGGING_FACE_HUB_TOKEN": os.environ["HUGGINGFACE_TOKEN"], + "HUGGING_FACE_HUB_TOKEN": os.environ["HF_TOKEN"], }, check=True, ) @@ -73,7 +73,7 @@ def download_model(): # # Next we run the download step to pre-populate the image with our model weights. # -# For this step to work on a gated model such as LLaMA 2, the HUGGINGFACE_TOKEN environment +# For this step to work on a gated model such as LLaMA 2, the HF_TOKEN environment # variable must be set ([reference](https://github.com/huggingface/text-generation-inference#using-a-private-or-gated-model)). # # After [creating a HuggingFace access token](https://huggingface.co/settings/tokens), @@ -130,7 +130,7 @@ def __enter__(self): ["text-generation-launcher"] + LAUNCH_FLAGS, env={ **os.environ, - "HUGGING_FACE_HUB_TOKEN": os.environ["HUGGINGFACE_TOKEN"], + "HUGGING_FACE_HUB_TOKEN": os.environ["HF_TOKEN"], }, ) self.client = AsyncClient("http://127.0.0.1:8000", timeout=60) diff --git a/06_gpu_and_ml/vllm_inference.py b/06_gpu_and_ml/vllm_inference.py index e3151f714..bdc40716d 100644 --- a/06_gpu_and_ml/vllm_inference.py +++ b/06_gpu_and_ml/vllm_inference.py @@ -35,7 +35,7 @@ # ### Download the weights # Make sure you have created a [HuggingFace access token](https://huggingface.co/settings/tokens). # To access the token in a Modal function, we can create a secret on the [secrets page](https://modal.com/secrets). -# Now the token will be available via the environment variable named `HUGGINGFACE_TOKEN`. Functions that inject this secret will have access to the environment variable. +# Now the token will be available via the environment variable named `HF_TOKEN`. Functions that inject this secret will have access to the environment variable. # # We can download the model to a particular directory using the HuggingFace utility function `snapshot_download`. # @@ -49,7 +49,7 @@ def download_model_to_folder(): snapshot_download( BASE_MODEL, local_dir=MODEL_DIR, - token=os.environ["HUGGINGFACE_TOKEN"], + token=os.environ["HF_TOKEN"], ) move_cache() diff --git a/10_integrations/pyjulia.py b/10_integrations/pyjulia.py index 8d9135372..b8a9d6841 100644 --- a/10_integrations/pyjulia.py +++ b/10_integrations/pyjulia.py @@ -3,13 +3,13 @@ stub = modal.Stub("example-pyjulia") stub.image = ( modal.Image.debian_slim() - # Install Julia 1.7 + # Install Julia 1.10 .apt_install("wget", "ca-certificates") .run_commands( - "wget -nv https://julialang-s3.julialang.org/bin/linux/x64/1.7/julia-1.7.2-linux-x86_64.tar.gz", - "tar -xf julia-1.7.2-linux-x86_64.tar.gz", - "cp -r julia-1.7.2 /opt/", - "ln -s /opt/julia-1.7.2/bin/julia /usr/local/bin/julia", + "wget -nv https://julialang-s3.julialang.org/bin/linux/x64/1.10/julia-1.10.0-linux-x86_64.tar.gz", + "tar -xf julia-1.10.0-linux-x86_64.tar.gz", + "cp -r julia-1.10.0 /opt/", + "ln -s /opt/julia-1.10.0/bin/julia /usr/local/bin/julia", ) # Install PyJulia bindings .pip_install("julia") diff --git a/10_integrations/stable_diffusion_slackbot.py b/10_integrations/stable_diffusion_slackbot.py index c76f9681e..e9447a633 100644 --- a/10_integrations/stable_diffusion_slackbot.py +++ b/10_integrations/stable_diffusion_slackbot.py @@ -35,7 +35,7 @@ # Next, [create a HuggingFace access token](https://huggingface.co/settings/tokens). # To access the token in a Modal function, we can create a secret on the # [secrets page](https://modal.com/secrets). Let's use the environment variable -# named `HUGGINGFACE_TOKEN`. Functions that inject this secret will have access +# named `HF_TOKEN`. Functions that inject this secret will have access # to the environment variable. # # ![create a huggingface token](./huggingface_token.png) @@ -57,7 +57,7 @@ def fetch_model(local_files_only: bool = False): return StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", - use_auth_token=os.environ["HUGGINGFACE_TOKEN"], + use_auth_token=os.environ["HF_TOKEN"], variant="fp16", torch_dtype=float16, device_map="auto",