diff --git a/06_gpu_and_ml/batch_inference/batch_inference_using_huggingface.py b/06_gpu_and_ml/batch_inference/batch_inference_using_huggingface.py index a9e735942..45a35f3aa 100644 --- a/06_gpu_and_ml/batch_inference/batch_inference_using_huggingface.py +++ b/06_gpu_and_ml/batch_inference/batch_inference_using_huggingface.py @@ -184,5 +184,5 @@ def main(): # Every container downloads the model when it starts, which is a bit inefficient. # In order to improve this, what you could do is store the model in the image that # backs each container. -# See [`Image.run_function`](/docs/guide/custom-container#running-a-function-as-a-build-step-beta). +# See [`Image.run_function`](/docs/guide/custom-container#run-a-modal-function-during-your-build-with-run_function-beta). # diff --git a/06_gpu_and_ml/llm-serving/falcon_gptq.py b/06_gpu_and_ml/llm-serving/falcon_gptq.py index 114b2eeca..e61cdc378 100644 --- a/06_gpu_and_ml/llm-serving/falcon_gptq.py +++ b/06_gpu_and_ml/llm-serving/falcon_gptq.py @@ -34,7 +34,7 @@ def download_model(): # Now, we define our image. We'll use the `debian-slim` base image, and install the dependencies we need # using [`pip_install`](/docs/reference/modal.Image#pip_install). At the end, we'll use -# [`run_function`](/docs/guide/custom-container#running-a-function-as-a-build-step-beta) to run the +# [`run_function`](/docs/guide/custom-container#run-a-modal-function-during-your-build-with-run_function-beta) to run the # function defined above as part of the image build. image = ( diff --git a/06_gpu_and_ml/llm-serving/openllama.py b/06_gpu_and_ml/llm-serving/openllama.py index a1a610240..d6f124ca3 100644 --- a/06_gpu_and_ml/llm-serving/openllama.py +++ b/06_gpu_and_ml/llm-serving/openllama.py @@ -33,7 +33,7 @@ def download_models(): # Now, we define our image. We'll use the `debian-slim` base image, and install the dependencies we need # using [`pip_install`](/docs/reference/modal.Image#pip_install). At the end, we'll use -# [`run_function`](/docs/guide/custom-container#running-a-function-as-a-build-step-beta) to run the +# [`run_function`](/docs/guide/custom-container#run-a-modal-function-during-your-build-with-run_function-beta) to run the # function defined above as part of the image build. image = (