Skip to content

Commit

Permalink
update hf token env var name (#1321)
Browse files Browse the repository at this point in the history
  • Loading branch information
dakinggg authored Jul 1, 2024
1 parent 80ba2b9 commit 73be9d3
Show file tree
Hide file tree
Showing 8 changed files with 17 additions and 19 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ Note: the `composer` command used above to train the model refers to the [Compos
If you have a write-enabled [HuggingFace auth token](https://huggingface.co/docs/hub/security-tokens), you can optionally upload your model to the Hub! Just export your token like this:

```bash
export HUGGING_FACE_HUB_TOKEN=your-auth-token
export HF_TOKEN=your-auth-token
```

and uncomment the line containing `--hf_repo_for_upload ...` in the above call to `inference/convert_composer_to_hf.py`.
Expand Down
2 changes: 1 addition & 1 deletion llmfoundry/utils/model_download_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def download_from_hf_hub(
available. Defaults to True.
tokenizer_only (bool): If true, only download tokenizer files.
token (str, optional): The HuggingFace API token. If not provided, the token will be read from the
`HUGGING_FACE_HUB_TOKEN` environment variable.
`HF_TOKEN` environment variable.
Raises:
RepositoryNotFoundError: If the model repo doesn't exist or the token is unauthorized.
Expand Down
2 changes: 1 addition & 1 deletion mcli/mcli-llama2-finetune.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ parameters:
init_device: mixed
pretrained_model_name_or_path: meta-llama/Llama-2-7b-hf
pretrained: true
# Note: you must have set the HUGGING_FACE_HUB_TOKEN environment variable and have access to the llama2 models
# Note: you must have set the HF_TOKEN environment variable and have access to the llama2 models
use_auth_token: true
use_flash_attention_2: true

Expand Down
8 changes: 2 additions & 6 deletions scripts/inference/hf_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,9 +364,7 @@ def main(args: Namespace) -> None:
except Exception as e:
raise RuntimeError(
'If you are having auth problems, try logging in via `huggingface-cli login` '
+
'or by setting the environment variable `export HUGGING_FACE_HUB_TOKEN=... '
+
+ 'or by setting the environment variable `export HF_TOKEN=... ' +
'using your access token from https://huggingface.co/settings/tokens.',
) from e

Expand All @@ -389,9 +387,7 @@ def main(args: Namespace) -> None:
raise RuntimeError(
'Unable to load HF model. ' +
'If you are having auth problems, try logging in via `huggingface-cli login` '
+
'or by setting the environment variable `export HUGGING_FACE_HUB_TOKEN=... '
+
+ 'or by setting the environment variable `export HF_TOKEN=... ' +
'using your access token from https://huggingface.co/settings/tokens.',
) from e

Expand Down
6 changes: 2 additions & 4 deletions scripts/inference/hf_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def main(args: Namespace) -> None:
except Exception as e:
raise RuntimeError(
'If you are having auth problems, try logging in via `huggingface-cli login` ' +\
'or by setting the environment variable `export HUGGING_FACE_HUB_TOKEN=... ' +\
'or by setting the environment variable `export HF_TOKEN=... ' +\
'using your access token from https://huggingface.co/settings/tokens.',
) from e

Expand Down Expand Up @@ -236,9 +236,7 @@ def main(args: Namespace) -> None:
raise RuntimeError(
'Unable to load HF model. ' +
'If you are having auth problems, try logging in via `huggingface-cli login` '
+
'or by setting the environment variable `export HUGGING_FACE_HUB_TOKEN=... '
+
+ 'or by setting the environment variable `export HF_TOKEN=... ' +
'using your access token from https://huggingface.co/settings/tokens.',
) from e

Expand Down
8 changes: 6 additions & 2 deletions scripts/misc/download_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
download_from_oras,
)

HF_TOKEN_ENV_VAR = 'HUGGING_FACE_HUB_TOKEN'
DEPRECATED_HF_TOKEN_ENV_VAR = 'HUGGING_FACE_HUB_TOKEN'
HF_TOKEN_ENV_VAR = 'HF_TOKEN'

logging.basicConfig(
format=f'%(asctime)s: %(levelname)s: %(name)s: %(message)s',
Expand All @@ -42,7 +43,10 @@ def add_hf_parser_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--token',
type=str,
default=os.getenv(HF_TOKEN_ENV_VAR),
default=os.getenv(
HF_TOKEN_ENV_VAR,
os.getenv(DEPRECATED_HF_TOKEN_ENV_VAR),
),
)


Expand Down
4 changes: 2 additions & 2 deletions tests/a_scripts/inference/test_convert_composer_to_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ def _get_model_and_tokenizer(
tokenizer_name = 'EleutherAI/gpt-neo-125M'
elif model == 'llama2':
assert tie_word_embeddings is None
if 'HUGGING_FACE_HUB_TOKEN' not in os.environ:
if 'HF_TOKEN' not in os.environ:
pytest.skip(
'The CI cluster does not have access to the Llama models, so skip this test.',
)
Expand Down Expand Up @@ -985,7 +985,7 @@ def test_convert_and_generate(
om_cfg['model']['config_overrides']['hidden_size'] = 36
elif model == 'llama2':
assert tie_word_embeddings is None
if 'HUGGING_FACE_HUB_TOKEN' not in os.environ:
if 'HF_TOKEN' not in os.environ:
pytest.skip(
'The CI cluster does not have access to the Llama models, so skip this test.',
)
Expand Down
4 changes: 2 additions & 2 deletions tests/models/hf/test_hf_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def test_hf_config_override(


@pytest.mark.skipif(
'HUGGING_FACE_HUB_TOKEN' not in os.environ,
'HF_TOKEN' not in os.environ,
reason='CI does not have access to llama2',
)
def test_rope_scaling_override():
Expand Down Expand Up @@ -205,7 +205,7 @@ def test_rope_scaling_override():


@pytest.mark.skipif(
'HUGGING_FACE_HUB_TOKEN' not in os.environ,
'HF_TOKEN' not in os.environ,
reason='CI does not have access to Dbrx',
)
def test_nested_override():
Expand Down

0 comments on commit 73be9d3

Please sign in to comment.