Skip to content

feat: bump vllm version and add local port accessing to inference services #993

feat: bump vllm version and add local port accessing to inference services

feat: bump vllm version and add local port accessing to inference services #993

Workflow file for this run

name: CI
"on":
workflow_dispatch:
pull_request:
types:
- opened
- reopened
- synchronize
merge_group:
env:
toolchain: nightly-2024-11-14
CARGO_HTTP_MULTIPLEXING: false
CARGO_TERM_COLOR: always
CARGO_UNSTABLE_SPARSE_REGISTRY: true
CARGO_INCREMENTAL: 0
TERM: unknown
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
clippy:
name: clippy
runs-on: [ubuntu-22.04]
services:
postgres:
image: postgres:13
env:
POSTGRES_DB: atoma
POSTGRES_USER: atoma
POSTGRES_PASSWORD: atoma
ports:
- 5432:5432
# health check to ensure database is ready
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: checkout
uses: actions/checkout@v4
- name: toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.toolchain }}
components: clippy, rustfmt
- name: cargo format
run: cargo fmt --all -- --check
- name: Install cargo-lints
run: cargo install cargo-lints
- name: Clippy check (with lints)
run: cargo lints clippy --all-targets
- name: Unit tests checks
run: cargo test