Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change HUGGINGFACE_TOKEN to HF_TOKEN #539

Merged
merged 1 commit into from
Jan 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions 06_gpu_and_ml/diffusers/train_and_serve_diffusers_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@
# import huggingface_hub

# # login to huggingface
# hf_key = os.environ["HUGGINGFACE_TOKEN"]
# hf_key = os.environ["HF_TOKEN"]
# huggingface_hub.login(hf_key)

# dataset = load_dataset("imagefolder", data_dir="/lg_white_bg_heroicon_png_img", split="train")
Expand Down Expand Up @@ -285,7 +285,7 @@ def train():
write_basic_config(mixed_precision="fp16")

# authenticate to hugging face so we can download the model weights
hf_key = os.environ["HUGGINGFACE_TOKEN"]
hf_key = os.environ["HF_TOKEN"]
huggingface_hub.login(hf_key)

# check whether we can access the model repo
Expand Down
2 changes: 1 addition & 1 deletion 06_gpu_and_ml/embeddings/text_embeddings_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def spawn_server() -> subprocess.Popen:
["text-embeddings-router"] + LAUNCH_FLAGS,
env={
**os.environ,
"HUGGING_FACE_HUB_TOKEN": os.environ["HUGGINGFACE_TOKEN"],
"HUGGING_FACE_HUB_TOKEN": os.environ["HF_TOKEN"],
},
)

Expand Down
6 changes: 3 additions & 3 deletions 06_gpu_and_ml/openai_whisper/finetuning/train/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,13 +171,13 @@ def __call__(
"mozilla-foundation/common_voice_11_0",
"hi",
split="train+validation",
use_auth_token=os.environ["HUGGINGFACE_TOKEN"],
use_auth_token=os.environ["HF_TOKEN"],
)
raw_datasets["eval"] = load_dataset(
"mozilla-foundation/common_voice_11_0",
"hi",
split="test",
use_auth_token=os.environ["HUGGINGFACE_TOKEN"],
use_auth_token=os.environ["HF_TOKEN"],
)

# Most ASR datasets only provide input audio samples (audio) and
Expand Down Expand Up @@ -211,7 +211,7 @@ def __call__(
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=os.environ["HUGGINGFACE_TOKEN"],
use_auth_token=os.environ["HF_TOKEN"],
)

config.update(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def download_and_compile():
MODEL_ID,
revision="fp16",
torch_dtype=torch.float16,
use_auth_token=os.environ["HUGGINGFACE_TOKEN"],
use_auth_token=os.environ["HF_TOKEN"],
).save_pretrained(MODEL_PATH, safe_serialization=True)

diffusers.EulerDiscreteScheduler.from_pretrained(
Expand Down
6 changes: 3 additions & 3 deletions 06_gpu_and_ml/text_generation_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def download_model():
],
env={
**os.environ,
"HUGGING_FACE_HUB_TOKEN": os.environ["HUGGINGFACE_TOKEN"],
"HUGGING_FACE_HUB_TOKEN": os.environ["HF_TOKEN"],
},
check=True,
)
Expand All @@ -73,7 +73,7 @@ def download_model():
#
# Next we run the download step to pre-populate the image with our model weights.
#
# For this step to work on a gated model such as LLaMA 2, the HUGGINGFACE_TOKEN environment
# For this step to work on a gated model such as LLaMA 2, the HF_TOKEN environment
# variable must be set ([reference](https://github.com/huggingface/text-generation-inference#using-a-private-or-gated-model)).
#
# After [creating a HuggingFace access token](https://huggingface.co/settings/tokens),
Expand Down Expand Up @@ -130,7 +130,7 @@ def __enter__(self):
["text-generation-launcher"] + LAUNCH_FLAGS,
env={
**os.environ,
"HUGGING_FACE_HUB_TOKEN": os.environ["HUGGINGFACE_TOKEN"],
"HUGGING_FACE_HUB_TOKEN": os.environ["HF_TOKEN"],
},
)
self.client = AsyncClient("http://127.0.0.1:8000", timeout=60)
Expand Down
4 changes: 2 additions & 2 deletions 06_gpu_and_ml/vllm_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
# ### Download the weights
# Make sure you have created a [HuggingFace access token](https://huggingface.co/settings/tokens).
# To access the token in a Modal function, we can create a secret on the [secrets page](https://modal.com/secrets).
# Now the token will be available via the environment variable named `HUGGINGFACE_TOKEN`. Functions that inject this secret will have access to the environment variable.
# Now the token will be available via the environment variable named `HF_TOKEN`. Functions that inject this secret will have access to the environment variable.
#
# We can download the model to a particular directory using the HuggingFace utility function `snapshot_download`.
#
Expand All @@ -49,7 +49,7 @@ def download_model_to_folder():
snapshot_download(
BASE_MODEL,
local_dir=MODEL_DIR,
token=os.environ["HUGGINGFACE_TOKEN"],
token=os.environ["HF_TOKEN"],
)
move_cache()

Expand Down
10 changes: 5 additions & 5 deletions 10_integrations/pyjulia.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
stub = modal.Stub("example-pyjulia")
stub.image = (
modal.Image.debian_slim()
# Install Julia 1.7
# Install Julia 1.10
.apt_install("wget", "ca-certificates")
.run_commands(
"wget -nv https://julialang-s3.julialang.org/bin/linux/x64/1.7/julia-1.7.2-linux-x86_64.tar.gz",
"tar -xf julia-1.7.2-linux-x86_64.tar.gz",
"cp -r julia-1.7.2 /opt/",
"ln -s /opt/julia-1.7.2/bin/julia /usr/local/bin/julia",
"wget -nv https://julialang-s3.julialang.org/bin/linux/x64/1.10/julia-1.10.0-linux-x86_64.tar.gz",
"tar -xf julia-1.10.0-linux-x86_64.tar.gz",
"cp -r julia-1.10.0 /opt/",
"ln -s /opt/julia-1.10.0/bin/julia /usr/local/bin/julia",
)
# Install PyJulia bindings
.pip_install("julia")
Expand Down
4 changes: 2 additions & 2 deletions 10_integrations/stable_diffusion_slackbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
# Next, [create a HuggingFace access token](https://huggingface.co/settings/tokens).
# To access the token in a Modal function, we can create a secret on the
# [secrets page](https://modal.com/secrets). Let's use the environment variable
# named `HUGGINGFACE_TOKEN`. Functions that inject this secret will have access
# named `HF_TOKEN`. Functions that inject this secret will have access
# to the environment variable.
#
# ![create a huggingface token](./huggingface_token.png)
Expand All @@ -57,7 +57,7 @@ def fetch_model(local_files_only: bool = False):

return StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
use_auth_token=os.environ["HUGGINGFACE_TOKEN"],
use_auth_token=os.environ["HF_TOKEN"],
variant="fp16",
torch_dtype=float16,
device_map="auto",
Expand Down