diff --git a/README.md b/README.md index 9cc8e3aba41..000aa524920 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene 1. Pull the official Docker image with: ```bash - docker pull ghcr.io/huggingface/tgi-gaudi:2.0.4 + docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5 ``` > [!NOTE] > Alternatively, you can build the Docker image using the `Dockerfile` located in this folder with: @@ -48,7 +48,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene hf_token=YOUR_ACCESS_TOKEN volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.4 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048 + docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048 ``` > For gated models such as [StarCoder](https://huggingface.co/bigcode/starcoder), you will have to pass `-e HUGGING_FACE_HUB_TOKEN=` to the `docker run` command above with a valid Hugging Face Hub read token. @@ -58,7 +58,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene hf_token=YOUR_ACCESS_TOKEN volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e PT_HPU_LAZY_MODE=0 -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.4 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048 + docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e PT_HPU_LAZY_MODE=0 -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048 ``` iii. On 8 Gaudi cards: @@ -67,7 +67,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene hf_token=YOUR_ACCESS_TOKEN volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run - docker run -p 8080:80 -v $volume:/data --runtime=habana -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.4 --model-id $model --sharded true --num-shard 8 --max-input-tokens 1024 --max-total-tokens 2048 + docker run -p 8080:80 -v $volume:/data --runtime=habana -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HUGGING_FACE_HUB_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --sharded true --num-shard 8 --max-input-tokens 1024 --max-total-tokens 2048 ``` 3. You can then send a simple request: ```bash @@ -137,7 +137,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -169,7 +169,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -201,7 +201,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -233,7 +233,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -265,7 +265,7 @@ docker run -p 8080:80 \ -e BATCH_BUCKET_SIZE=1 \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-total-tokens 8192 --max-batch-total-tokens 32768 @@ -316,7 +316,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -351,7 +351,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -387,7 +387,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -422,7 +422,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -455,7 +455,7 @@ docker run -p 8080:80 \ -e BATCH_BUCKET_SIZE=1 \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-total-tokens 8192 --max-batch-total-tokens 32768 @@ -486,7 +486,7 @@ docker run -p 8080:80 \ -e BATCH_BUCKET_SIZE=1 \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.4 \ + ghcr.io/huggingface/tgi-gaudi:2.0.5 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \