diff --git a/.github/workflows/smoketest.yaml b/.github/workflows/smoketest.yaml new file mode 100644 index 0000000000..0bf3968753 --- /dev/null +++ b/.github/workflows/smoketest.yaml @@ -0,0 +1,41 @@ +name: Smoketest +on: + push: + branches: + - main + - release/* + pull_request: + branches: + - main + - release/* + workflow_dispatch: +# Cancel old runs when a new commit is pushed to the same branch if not on main or dev +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/dev' }} +defaults: + run: + working-directory: . +jobs: + smoketest: + runs-on: ubuntu-20.04 + timeout-minutes: 10 + strategy: + matrix: + python_version: + - "3.9" + - "3.10" + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python_version }} + - name: Setup + run: | + set -ex + python -m pip install --upgrade 'pip<23' wheel + python -m pip install --upgrade . + python -m pip install pytest==7.2.1 pytest_codeblocks==0.16.1 + - name: Run checks + run: | + pytest tests/test_smoketest.py diff --git a/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py b/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py index 7257b98bd8..39de2ba59c 100644 --- a/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py +++ b/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py @@ -23,9 +23,11 @@ 'OpenAICausalLMEvalWrapper', 'OpenAIChatAPIEvalWrapper', ] -from openai.types.chat.chat_completion import ChatCompletion -from openai.types.completion import Completion -from openai.types.completion_choice import Logprobs + +if TYPE_CHECKING: + from openai.types.chat.chat_completion import ChatCompletion + from openai.types.completion import Completion + from openai.types.completion_choice import Logprobs MAX_RETRIES = 10 @@ -99,7 +101,7 @@ def __init__(self, model_cfg: Dict, tokenizer: AutoTokenizer) -> None: 'role': 'system', 'content': - model_cfg.get('sytsem_role_prompt', + model_cfg.get('system_role_prompt', 'Please complete the following text: ') }, { 'role': 'user', @@ -201,7 +203,7 @@ def eval_forward(self, batch: Batch, outputs: Optional[Any] = None): return torch.stack(output_logits_batch).to(batch['input_ids'].device) - def process_result(self, completion: Optional[ChatCompletion]): + def process_result(self, completion: Optional['ChatCompletion']): if completion is None: raise ValueError("Couldn't generate model output") @@ -234,7 +236,7 @@ def __init__(self, model_cfg: Dict, tokenizer: AutoTokenizer) -> None: logprobs=5, temperature=0.0) - def process_result(self, completion: Optional[Completion]): + def process_result(self, completion: Optional['Completion']): if completion is None: raise ValueError("Couldn't generate model output") diff --git a/tests/test_smoketest.py b/tests/test_smoketest.py new file mode 100644 index 0000000000..a43925e506 --- /dev/null +++ b/tests/test_smoketest.py @@ -0,0 +1,16 @@ +# Copyright 2022 MosaicML LLM Foundry authors +# SPDX-License-Identifier: Apache-2.0 + +from llmfoundry import callbacks, data, models, optim, tokenizers, utils + + +# This very simple test is just to use the above imports, which check and make sure we can import all the top-level +# modules from foundry. This is mainly useful for checking that we have correctly conditionally imported all optional +# dependencies. +def test_smoketest(): + assert callbacks + assert data + assert models + assert optim + assert tokenizers + assert utils