diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index 84af1683ec09..c2824ca677e3 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -132,4 +132,88 @@ jobs: make -C backend/python/transformers-musicgen test - \ No newline at end of file + + tests-bark: + runs-on: ubuntu-latest + steps: + - name: Clone + uses: actions/checkout@v4 + with: + submodules: true + - name: Dependencies + run: | + sudo apt-get update + sudo apt-get install build-essential ffmpeg + curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg && \ + sudo install -o root -g root -m 644 conda.gpg /usr/share/keyrings/conda-archive-keyring.gpg && \ + gpg --keyring /usr/share/keyrings/conda-archive-keyring.gpg --no-default-keyring --fingerprint 34161F5BF5EB1D4BFBBB8F0A8AEB4F8B29D82806 && \ + sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" > /etc/apt/sources.list.d/conda.list' && \ + sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" | tee -a /etc/apt/sources.list.d/conda.list' && \ + sudo apt-get update && \ + sudo apt-get install -y conda + sudo apt-get install -y ca-certificates cmake curl patch + sudo apt-get install -y libopencv-dev && sudo ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2 + + sudo rm -rfv /usr/bin/conda || true + + - name: Test bark + run: | + export PATH=$PATH:/opt/conda/bin + make -C backend/python/bark + make -C backend/python/bark test + + + # Below tests needs GPU. Commented out for now + # TODO: Re-enable as soon as we have GPU nodes + # tests-vllm: + # runs-on: ubuntu-latest + # steps: + # - name: Clone + # uses: actions/checkout@v4 + # with: + # submodules: true + # - name: Dependencies + # run: | + # sudo apt-get update + # sudo apt-get install build-essential ffmpeg + # curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg && \ + # sudo install -o root -g root -m 644 conda.gpg /usr/share/keyrings/conda-archive-keyring.gpg && \ + # gpg --keyring /usr/share/keyrings/conda-archive-keyring.gpg --no-default-keyring --fingerprint 34161F5BF5EB1D4BFBBB8F0A8AEB4F8B29D82806 && \ + # sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" > /etc/apt/sources.list.d/conda.list' && \ + # sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" | tee -a /etc/apt/sources.list.d/conda.list' && \ + # sudo apt-get update && \ + # sudo apt-get install -y conda + # sudo apt-get install -y ca-certificates cmake curl patch + # sudo apt-get install -y libopencv-dev && sudo ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2 + # sudo rm -rfv /usr/bin/conda || true + # - name: Test vllm + # run: | + # export PATH=$PATH:/opt/conda/bin + # make -C backend/python/vllm + # make -C backend/python/vllm test + # tests-vallex: + # runs-on: ubuntu-latest + # steps: + # - name: Clone + # uses: actions/checkout@v4 + # with: + # submodules: true + # - name: Dependencies + # run: | + # sudo apt-get update + # sudo apt-get install build-essential ffmpeg + # curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg && \ + # sudo install -o root -g root -m 644 conda.gpg /usr/share/keyrings/conda-archive-keyring.gpg && \ + # gpg --keyring /usr/share/keyrings/conda-archive-keyring.gpg --no-default-keyring --fingerprint 34161F5BF5EB1D4BFBBB8F0A8AEB4F8B29D82806 && \ + # sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" > /etc/apt/sources.list.d/conda.list' && \ + # sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" | tee -a /etc/apt/sources.list.d/conda.list' && \ + # sudo apt-get update && \ + # sudo apt-get install -y conda + # sudo apt-get install -y ca-certificates cmake curl patch + # sudo apt-get install -y libopencv-dev && sudo ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2 + # sudo rm -rfv /usr/bin/conda || true + # - name: Test vall-e-x + # run: | + # export PATH=$PATH:/opt/conda/bin + # make -C backend/python/vall-e-x + # make -C backend/python/vall-e-x test diff --git a/backend/python/bark/Makefile b/backend/python/bark/Makefile index 03dc9921bc4d..f55c645dd093 100644 --- a/backend/python/bark/Makefile +++ b/backend/python/bark/Makefile @@ -3,3 +3,15 @@ ttsbark: @echo "Creating virtual environment..." @conda env create --name ttsbark --file ttsbark.yml @echo "Virtual environment created." + +.PHONY: run +run: + @echo "Running bark..." + bash run.sh + @echo "bark run." + +.PHONY: test +test: + @echo "Testing bark..." + bash test.sh + @echo "bark tested." diff --git a/backend/python/bark/test.py b/backend/python/bark/test.py new file mode 100644 index 000000000000..3a79dd00e83a --- /dev/null +++ b/backend/python/bark/test.py @@ -0,0 +1,81 @@ +""" +A test script to test the gRPC service +""" +import unittest +import subprocess +import time +import backend_pb2 +import backend_pb2_grpc + +import grpc + + +class TestBackendServicer(unittest.TestCase): + """ + TestBackendServicer is the class that tests the gRPC service + """ + def setUp(self): + """ + This method sets up the gRPC service by starting the server + """ + self.service = subprocess.Popen(["python3", "ttsbark.py", "--addr", "localhost:50051"]) + time.sleep(10) + + def tearDown(self) -> None: + """ + This method tears down the gRPC service by terminating the server + """ + self.service.terminate() + self.service.wait() + + def test_server_startup(self): + """ + This method tests if the server starts up successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.Health(backend_pb2.HealthMessage()) + self.assertEqual(response.message, b'OK') + except Exception as err: + print(err) + self.fail("Server failed to start") + finally: + self.tearDown() + + def test_load_model(self): + """ + This method tests if the model is loaded successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="v2/en_speaker_4")) + self.assertTrue(response.success) + self.assertEqual(response.message, "Model loaded successfully") + except Exception as err: + print(err) + self.fail("LoadModel service failed") + finally: + self.tearDown() + + def test_tts(self): + """ + This method tests if the embeddings are generated successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="v2/en_speaker_4")) + self.assertTrue(response.success) + tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story") + tts_response = stub.TTS(tts_request) + self.assertIsNotNone(tts_response) + except Exception as err: + print(err) + self.fail("TTS service failed") + finally: + self.tearDown() \ No newline at end of file diff --git a/backend/python/bark/test.sh b/backend/python/bark/test.sh new file mode 100644 index 000000000000..33fd0c5a11de --- /dev/null +++ b/backend/python/bark/test.sh @@ -0,0 +1,11 @@ +#!/bin/bash +## +## A bash script wrapper that runs the bark server with conda + +# Activate conda environment +source activate ttsbark + +# get the directory where the bash script is located +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +python -m unittest $DIR/test.py \ No newline at end of file diff --git a/backend/python/bark/test_ttsbark.py b/backend/python/bark/test_ttsbark.py deleted file mode 100644 index 372df1678ec1..000000000000 --- a/backend/python/bark/test_ttsbark.py +++ /dev/null @@ -1,32 +0,0 @@ -import unittest -import subprocess -import time -import backend_pb2 -import backend_pb2_grpc - -import grpc - -class TestBackendServicer(unittest.TestCase): - """ - TestBackendServicer is the class that tests the gRPC service - """ - def setUp(self): - self.service = subprocess.Popen(["python3", "ttsbark.py", "--addr", "localhost:50051"]) - - def tearDown(self) -> None: - self.service.terminate() - self.service.wait() - - def test_server_startup(self): - time.sleep(2) - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.Health(backend_pb2.HealthMessage()) - self.assertEqual(response.message, b'OK') - except Exception as err: - print(err) - self.fail("Server failed to start") - finally: - self.tearDown() diff --git a/backend/python/diffusers/diffusers.yml b/backend/python/diffusers/diffusers.yml index fb315ab0a44e..48e4e62b2c69 100644 --- a/backend/python/diffusers/diffusers.yml +++ b/backend/python/diffusers/diffusers.yml @@ -25,15 +25,15 @@ dependencies: - xz=5.4.2=h5eee18b_0 - zlib=1.2.13=h5eee18b_0 - pip: - - accelerate==0.23.0 + - accelerate>=0.11.0 - certifi==2023.7.22 - charset-normalizer==3.3.0 - compel==2.0.2 - - diffusers==0.21.4 + - diffusers==0.24.0 - filelock==3.12.4 - fsspec==2023.9.2 - grpcio==1.59.0 - - huggingface-hub==0.17.3 + - huggingface-hub>=0.19.4 - idna==3.4 - importlib-metadata==6.8.0 - jinja2==3.1.2 @@ -63,10 +63,9 @@ dependencies: - requests==2.31.0 - safetensors==0.4.0 - sympy==1.12 - - tokenizers==0.14.1 - torch==2.1.0 - tqdm==4.66.1 - - transformers==4.34.0 + - transformers>=4.25.1 - triton==2.1.0 - typing-extensions==4.8.0 - urllib3==2.0.6 diff --git a/backend/python/transformers-musicgen/Makefile b/backend/python/transformers-musicgen/Makefile index 4a02d7726062..2191c481abb8 100644 --- a/backend/python/transformers-musicgen/Makefile +++ b/backend/python/transformers-musicgen/Makefile @@ -17,7 +17,6 @@ run: bash run.sh @echo "transformers run." -# It is not working well by using command line. It only6 works with IDE like VSCode. .PHONY: test test: @echo "Testing transformers..." diff --git a/backend/python/vall-e-x/Makefile b/backend/python/vall-e-x/Makefile index 0aa19205a147..fdb0ab7c900f 100644 --- a/backend/python/vall-e-x/Makefile +++ b/backend/python/vall-e-x/Makefile @@ -10,3 +10,9 @@ run: @echo "Running ttsvalle..." bash run.sh @echo "ttsvalle run." + +.PHONY: test +test: + @echo "Testing valle..." + bash test.sh + @echo "valle tested." diff --git a/backend/python/vall-e-x/test.py b/backend/python/vall-e-x/test.py new file mode 100644 index 000000000000..9acc7ec649e1 --- /dev/null +++ b/backend/python/vall-e-x/test.py @@ -0,0 +1,81 @@ +""" +A test script to test the gRPC service +""" +import unittest +import subprocess +import time +import backend_pb2 +import backend_pb2_grpc + +import grpc + + +class TestBackendServicer(unittest.TestCase): + """ + TestBackendServicer is the class that tests the gRPC service + """ + def setUp(self): + """ + This method sets up the gRPC service by starting the server + """ + self.service = subprocess.Popen(["python3", "ttsvalle.py", "--addr", "localhost:50051"]) + time.sleep(10) + + def tearDown(self) -> None: + """ + This method tears down the gRPC service by terminating the server + """ + self.service.terminate() + self.service.wait() + + def test_server_startup(self): + """ + This method tests if the server starts up successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.Health(backend_pb2.HealthMessage()) + self.assertEqual(response.message, b'OK') + except Exception as err: + print(err) + self.fail("Server failed to start") + finally: + self.tearDown() + + def test_load_model(self): + """ + This method tests if the model is loaded successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen")) + self.assertTrue(response.success) + self.assertEqual(response.message, "Model loaded successfully") + except Exception as err: + print(err) + self.fail("LoadModel service failed") + finally: + self.tearDown() + + def test_tts(self): + """ + This method tests if the embeddings are generated successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen")) + self.assertTrue(response.success) + tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story") + tts_response = stub.TTS(tts_request) + self.assertIsNotNone(tts_response) + except Exception as err: + print(err) + self.fail("TTS service failed") + finally: + self.tearDown() \ No newline at end of file diff --git a/backend/python/vall-e-x/test.sh b/backend/python/vall-e-x/test.sh new file mode 100644 index 000000000000..df897a0b87e2 --- /dev/null +++ b/backend/python/vall-e-x/test.sh @@ -0,0 +1,11 @@ +#!/bin/bash +## +## A bash script wrapper that runs the ttsvalle server with conda + +# Activate conda environment +source activate ttsvalle + +# get the directory where the bash script is located +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +python -m unittest $DIR/test.py \ No newline at end of file diff --git a/backend/python/vllm/Makefile b/backend/python/vllm/Makefile index b8a07596a93c..4131af41f151 100644 --- a/backend/python/vllm/Makefile +++ b/backend/python/vllm/Makefile @@ -9,3 +9,9 @@ run: @echo "Running vllm..." bash run.sh @echo "vllm run." + +.PHONY: test +test: + @echo "Testing vllm..." + bash test.sh + @echo "vllm tested." \ No newline at end of file diff --git a/backend/python/vllm/test.sh b/backend/python/vllm/test.sh new file mode 100644 index 000000000000..70a502eb0aba --- /dev/null +++ b/backend/python/vllm/test.sh @@ -0,0 +1,11 @@ +#!/bin/bash +## +## A bash script wrapper that runs the transformers server with conda + +# Activate conda environment +source activate vllm + +# get the directory where the bash script is located +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +python -m unittest $DIR/test_backend_vllm.py \ No newline at end of file diff --git a/backend/python/vllm/test_backend_vllm.py b/backend/python/vllm/test_backend_vllm.py index c1e6f28a0ddb..06317c738d85 100644 --- a/backend/python/vllm/test_backend_vllm.py +++ b/backend/python/vllm/test_backend_vllm.py @@ -21,13 +21,13 @@ class TestBackendServicer(unittest.TestCase): """ def setUp(self): self.service = subprocess.Popen(["python", "backend_vllm.py", "--addr", "localhost:50051"]) + time.sleep(10) def tearDown(self) -> None: self.service.terminate() self.service.wait() def test_server_startup(self): - time.sleep(2) try: self.setUp() with grpc.insecure_channel("localhost:50051") as channel: @@ -39,3 +39,38 @@ def test_server_startup(self): self.fail("Server failed to start") finally: self.tearDown() + def test_load_model(self): + """ + This method tests if the model is loaded successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m")) + self.assertTrue(response.success) + self.assertEqual(response.message, "Model loaded successfully") + except Exception as err: + print(err) + self.fail("LoadModel service failed") + finally: + self.tearDown() + + def test_text(self): + """ + This method tests if the embeddings are generated successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m")) + self.assertTrue(response.success) + req = backend_pb2.PredictOptions(prompt="The capital of France is") + resp = stub.Predict(req) + self.assertIsNotNone(resp.message) + except Exception as err: + print(err) + self.fail("text service failed") + finally: + self.tearDown() \ No newline at end of file