Skip to content

Commit

Permalink
feat(conda):Add seperate env for vllm (#1148)
Browse files Browse the repository at this point in the history
**Description**

This PR is related to #1117

**Notes for Reviewers**

* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile

**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.

<!--
Thank you for contributing to LocalAI!

Contributing Conventions
-------------------------

The draft above helps to give a quick overview of your PR.

Remember to remove this comment and to at least:

1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!

By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.

If no one reviews your PR within a few days, please @-mention @mudler.
-->

Signed-off-by: GitHub <[email protected]>
Signed-off-by: Ettore Di Giacinto <[email protected]>
  • Loading branch information
Aisuko authored and mudler committed Oct 19, 2023
1 parent c40d924 commit 2711f79
Show file tree
Hide file tree
Showing 8 changed files with 229 additions and 7 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ ARG TARGETARCH
ARG TARGETVARIANT

ENV BUILD_TYPE=${BUILD_TYPE}
ENV EXTERNAL_GRPC_BACKENDS="huggingface-embeddings:/build/extra/grpc/huggingface/huggingface.py,autogptq:/build/extra/grpc/autogptq/run.sh,bark:/build/extra/grpc/bark/run.sh,diffusers:/build/extra/grpc/diffusers/run.sh,exllama:/build/extra/grpc/exllama/exllama.py,vall-e-x:/build/extra/grpc/vall-e-x/ttsvalle.py,vllm:/build/extra/grpc/vllm/backend_vllm.py"
ENV EXTERNAL_GRPC_BACKENDS="huggingface-embeddings:/build/extra/grpc/huggingface/huggingface.py,autogptq:/build/extra/grpc/autogptq/run.sh,bark:/build/extra/grpc/bark/run.sh,diffusers:/build/extra/grpc/diffusers/run.sh,exllama:/build/extra/grpc/exllama/exllama.py,vall-e-x:/build/extra/grpc/vall-e-x/ttsvalle.py,vllm:/build/extra/grpc/vllm/run.sh"
ENV GALLERIES='[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}, {"url": "github:go-skynet/model-gallery/huggingface.yaml","name":"huggingface"}]'
ARG GO_TAGS="stablediffusion tts"

Expand Down
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -413,6 +413,7 @@ prepare-extra-conda-environments:
$(MAKE) -C extra/grpc/autogptq
$(MAKE) -C extra/grpc/bark
$(MAKE) -C extra/grpc/diffusers
$(MAKE) -C extra/grpc/vllm

backend-assets/grpc:
mkdir -p backend-assets/grpc
Expand Down
11 changes: 11 additions & 0 deletions extra/grpc/vllm/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
.PONY: vllm
vllm:
@echo "Creating virtual environment..."
@conda env create --name vllm --file vllm.yml
@echo "Virtual environment created."

.PONY: run
run:
@echo "Running vllm..."
bash run.sh
@echo "vllm run."
5 changes: 5 additions & 0 deletions extra/grpc/vllm/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Creating a separate environment for the vllm project

```
make vllm
```
67 changes: 61 additions & 6 deletions extra/grpc/vllm/backend_vllm.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
#!/usr/bin/env python3
import grpc
from concurrent import futures
import time
import backend_pb2
import backend_pb2_grpc
import argparse
import signal
import sys
import os, glob
import os

import backend_pb2
import backend_pb2_grpc

from pathlib import Path
import grpc
from vllm import LLM, SamplingParams

_ONE_DAY_IN_SECONDS = 60 * 60 * 24
Expand All @@ -19,7 +19,20 @@

# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
A gRPC servicer that implements the Backend service defined in backend.proto.
"""
def generate(self,prompt, max_new_tokens):
"""
Generates text based on the given prompt and maximum number of new tokens.
Args:
prompt (str): The prompt to generate text from.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated text.
"""
self.generator.end_beam_search()

# Tokenizing the input
Expand All @@ -41,9 +54,31 @@ def generate(self,prompt, max_new_tokens):
if token.item() == self.generator.tokenizer.eos_token_id:
break
return decoded_text

def Health(self, request, context):
"""
Returns a health check message.
Args:
request: The health check request.
context: The gRPC context.
Returns:
backend_pb2.Reply: The health check reply.
"""
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))

def LoadModel(self, request, context):
"""
Loads a language model.
Args:
request: The load model request.
context: The gRPC context.
Returns:
backend_pb2.Result: The load model result.
"""
try:
if request.Quantization != "":
self.llm = LLM(model=request.Model, quantization=request.Quantization)
Expand All @@ -54,6 +89,16 @@ def LoadModel(self, request, context):
return backend_pb2.Result(message="Model loaded successfully", success=True)

def Predict(self, request, context):
"""
Generates text based on the given prompt and sampling parameters.
Args:
request: The predict request.
context: The gRPC context.
Returns:
backend_pb2.Result: The predict result.
"""
if request.TopP == 0:
request.TopP = 0.9

Expand All @@ -68,6 +113,16 @@ def Predict(self, request, context):
return backend_pb2.Result(message=bytes(generated_text, encoding='utf-8'))

def PredictStream(self, request, context):
"""
Generates text based on the given prompt and sampling parameters, and streams the results.
Args:
request: The predict stream request.
context: The gRPC context.
Returns:
backend_pb2.Result: The predict stream result.
"""
# Implement PredictStream RPC
#for reply in some_data_generator():
# yield reply
Expand Down Expand Up @@ -104,4 +159,4 @@ def signal_handler(sig, frame):
)
args = parser.parse_args()

serve(args.addr)
serve(args.addr)
10 changes: 10 additions & 0 deletions extra/grpc/vllm/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
##
## A bash script wrapper that runs the diffusers server with conda

# Activate conda environment
source activate vllm

# get the directory where the bash script is located
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"

python $DIR/backend_vllm.py
41 changes: 41 additions & 0 deletions extra/grpc/vllm/test_backend_vllm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import unittest
import subprocess
import time
import backend_pb2
import backend_pb2_grpc

import grpc

import unittest
import subprocess
import time
import grpc
import backend_pb2_grpc
import backend_pb2

class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service.
This class contains methods to test the startup and shutdown of the gRPC service.
"""
def setUp(self):
self.service = subprocess.Popen(["python", "backend_vllm.py", "--addr", "localhost:50051"])

def tearDown(self) -> None:
self.service.terminate()
self.service.wait()

def test_server_startup(self):
time.sleep(2)
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.Health(backend_pb2.HealthMessage())
self.assertEqual(response.message, b'OK')
except Exception as err:
print(err)
self.fail("Server failed to start")
finally:
self.tearDown()
99 changes: 99 additions & 0 deletions extra/grpc/vllm/vllm.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
name: vllm
channels:
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h7b6447c_0
- ca-certificates=2023.08.22=h06a4308_0
- ld_impl_linux-64=2.38=h1181459_1
- libffi=3.4.4=h6a678d5_0
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.11=h7f8727e_2
- pip=23.2.1=py311h06a4308_0
- python=3.11.5=h955ad1f_0
- readline=8.2=h5eee18b_0
- setuptools=68.0.0=py311h06a4308_0
- sqlite=3.41.2=h5eee18b_0
- tk=8.6.12=h1ccaba5_0
- wheel=0.41.2=py311h06a4308_0
- xz=5.4.2=h5eee18b_0
- zlib=1.2.13=h5eee18b_0
- pip:
- aiosignal==1.3.1
- anyio==3.7.1
- attrs==23.1.0
- certifi==2023.7.22
- charset-normalizer==3.3.0
- click==8.1.7
- cmake==3.27.6
- fastapi==0.103.2
- filelock==3.12.4
- frozenlist==1.4.0
- fsspec==2023.9.2
- grpcio==1.59.0
- h11==0.14.0
- httptools==0.6.0
- huggingface-hub==0.17.3
- idna==3.4
- jinja2==3.1.2
- jsonschema==4.19.1
- jsonschema-specifications==2023.7.1
- lit==17.0.2
- markupsafe==2.1.3
- mpmath==1.3.0
- msgpack==1.0.7
- networkx==3.1
- ninja==1.11.1
- numpy==1.26.0
- nvidia-cublas-cu11==11.10.3.66
- nvidia-cuda-cupti-cu11==11.7.101
- nvidia-cuda-nvrtc-cu11==11.7.99
- nvidia-cuda-runtime-cu11==11.7.99
- nvidia-cudnn-cu11==8.5.0.96
- nvidia-cufft-cu11==10.9.0.58
- nvidia-curand-cu11==10.2.10.91
- nvidia-cusolver-cu11==11.4.0.1
- nvidia-cusparse-cu11==11.7.4.91
- nvidia-nccl-cu11==2.14.3
- nvidia-nvtx-cu11==11.7.91
- packaging==23.2
- pandas==2.1.1
- protobuf==4.24.4
- psutil==5.9.5
- pyarrow==13.0.0
- pydantic==1.10.13
- python-dateutil==2.8.2
- python-dotenv==1.0.0
- pytz==2023.3.post1
- pyyaml==6.0.1
- ray==2.7.0
- referencing==0.30.2
- regex==2023.10.3
- requests==2.31.0
- rpds-py==0.10.4
- safetensors==0.4.0
- sentencepiece==0.1.99
- six==1.16.0
- sniffio==1.3.0
- starlette==0.27.0
- sympy==1.12
- tokenizers==0.14.1
- torch==2.0.1
- tqdm==4.66.1
- transformers==4.34.0
- triton==2.0.0
- typing-extensions==4.8.0
- tzdata==2023.3
- urllib3==2.0.6
- uvicorn==0.23.2
- uvloop==0.17.0
- vllm==0.2.0
- watchfiles==0.20.0
- websockets==11.0.3
- xformers==0.0.22
prefix: /opt/conda/envs/vllm

0 comments on commit 2711f79

Please sign in to comment.