diff --git a/.github/workflows/test-gpu.yml b/.github/workflows/test-gpu.yml index c6f19a92a012..ee08b9332e4a 100644 --- a/.github/workflows/test-gpu.yml +++ b/.github/workflows/test-gpu.yml @@ -78,6 +78,9 @@ jobs: sudo rm -rf build || true sudo rm -rf bin || true sudo rm -rf dist || true + sudo docker logs $(sudo docker ps -q --filter ancestor=localai-tests) > logs.txt + sudo cat logs.txt || true + sudo rm -rf logs.txt make clean || true make teardown-e2e || true docker system prune -f -a --volumes || true \ No newline at end of file diff --git a/Makefile b/Makefile index b088c28b5159..924aebd8c6a5 100644 --- a/Makefile +++ b/Makefile @@ -340,7 +340,7 @@ run-e2e-image: test-e2e: @echo 'Running e2e tests' - LOCALAI_API=http://$(E2E_BRIDGE_IP):5390 $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e + LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e teardown-e2e: rm -rf ./tests/e2e-fixtures/ggllm-test-model.bin diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index fe3bd84c6466..ecf8ec2b4136 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -34,7 +34,7 @@ var _ = Describe("E2E test", func() { // Check that the GPU was used AfterEach(func() { // Execute docker logs $$(docker ps -q --filter ancestor=localai-tests) as a command and check the output - cmd := exec.Command("/bin/bash", "-xce", "docker logs $$(docker ps -q --filter ancestor=localai-tests)") + cmd := exec.Command("/bin/bash", "-xce", "docker logs $(docker ps -q --filter ancestor=localai-tests)") out, err := cmd.CombinedOutput() Expect(err).ToNot(HaveOccurred()) Expect(string(out)).To(ContainSubstring("found 1 CUDA devices")) @@ -45,7 +45,7 @@ var _ = Describe("E2E test", func() { It("streams chat tokens", func() { models, err := client.ListModels(context.TODO()) Expect(err).ToNot(HaveOccurred()) - Expect(models.Models).ToNot(BeEmpty()) + Expect(models.Models).ToNot(BeEmpty(), models.Models) model := models.Models[0].ID resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{