diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 6b12d19ba611f..6a2932db9f2dc 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -48,6 +48,7 @@ steps: - TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_chunked_prefill_distributed.py - pytest -v -s spec_decode/e2e/test_integration_dist.py - CUDA_VISIBLE_DEVICES=0,1 pytest -v -s test_sharded_state_loader.py + - CUDA_VISIBLE_DEVICES=0,1 pytest -v -s distributed/test_utils.py - label: Distributed Tests (Multiple Groups) #mirror_hardwares: [amd] diff --git a/.buildkite/test-template-aws.j2 b/.buildkite/test-template-aws.j2 index 3b5d36b246673..09649b625c319 100644 --- a/.buildkite/test-template-aws.j2 +++ b/.buildkite/test-template-aws.j2 @@ -7,7 +7,7 @@ steps: queue: cpu_queue commands: - "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7" - - "docker build --build-arg max_jobs=16 --tag {{ docker_image }} --target test --progress plain ." + - "docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --tag {{ docker_image }} --target test --progress plain ." - "docker push {{ docker_image }}" env: DOCKER_BUILDKIT: "1" @@ -19,6 +19,34 @@ steps: limit: 5 - wait + - group: "AMD Tests" + depends_on: ~ + steps: + {% for step in steps %} + {% if step.mirror_hardwares and "amd" in step.mirror_hardwares %} + - label: "AMD: {{ step.label }}" + agents: + queue: amd + command: bash .buildkite/run-amd-test.sh "cd {{ (step.working_dir or default_working_dir) | safe }} ; {{ step.command or (step.commands | join(" ; ")) | safe }}" + env: + DOCKER_BUILDKIT: "1" + soft_fail: true + {% endif %} + {% endfor %} + + - label: "Neuron Test" + depends_on: ~ + agents: + queue: neuron + command: bash .buildkite/run-neuron-test.sh + soft_fail: false + + - label: "Intel Test" + depends_on: ~ + agents: + queue: intel + command: bash .buildkite/run-cpu-test.sh + {% for step in steps %} - label: "{{ step.label }}" agents: @@ -31,7 +59,7 @@ steps: {% else %} queue: gpu_1_queue {% endif %} - soft_fail: true + soft_fail: {{ step.soft_fail or false }} {% if step.parallelism %} parallelism: {{ step.parallelism }} {% endif %} diff --git a/.github/actions/nm-set-env/action.yml b/.github/actions/nm-set-env/action.yml index fbd80377e687f..dd8ef9e99d61c 100644 --- a/.github/actions/nm-set-env/action.yml +++ b/.github/actions/nm-set-env/action.yml @@ -39,6 +39,9 @@ runs: # testmo echo "XDG_CONFIG_HOME=/usr/local/apps" >> $GITHUB_ENV echo "PROJECT_ID=12" >> $GITHUB_ENV + # disable usage stats (writes to protected /usr/local/apps) + echo "VLLM_NO_USAGE_STATS=1" >> $GITHUB_ENV + echo "DO_NOT_TRACK=1" >> $GITHUB_ENV env: HF_TOKEN_SECRET: ${{ inputs.hf_token }} shell: bash diff --git a/.github/scripts/run-tests b/.github/scripts/run-tests index e64ea401b16ce..bea7fffd7a93a 100755 --- a/.github/scripts/run-tests +++ b/.github/scripts/run-tests @@ -109,6 +109,10 @@ do LOCAL_SUCCESS=0 RESULT_XML=$(echo ${TEST} | sed -e "s/${TEST_DIR}/${RESULTS_DIR}/" | sed -e "s/.py/.xml/") + # report which test is being run + # (in CI, if a test hangs, this logs *which* test is running *before* it hangs) + echo "=== RUNNING TEST: ${TEST} ===" + # this is a bit messy and brittle, but certain tests # need to be run with specific options if [[ "${TEST}" == *"kernels"* || "${TEST}" == *"samplers"* ]]; then @@ -125,7 +129,18 @@ do pytest ${CC_PYTEST_FLAGS} --junitxml=${RESULT_XML} ${TEST} || LOCAL_SUCCESS=$? fi - SUCCESS=$((SUCCESS + LOCAL_SUCCESS)) + # if a file gets exit code 0, we are good + if [[ $LOCAL_SUCCESS == 0 ]]; then + echo "=== PASSED TEST: ${TEST} ===" + # if a file does not run any tests, pytest reports exit code of 5 + # since we skip full modules in our skipping strategy, this is common + elif [[ $LOCAL_SUCCESS == 5 ]]; then + echo "=== SKIPPED TEST: ${TEST} ===" + # otherwise, report failure + else + echo "=== FAILED TEST: ${TEST} ===" + SUCCESS=$((SUCCESS + LOCAL_SUCCESS)) + fi done diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index e71033f828006..773def58fd966 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -25,7 +25,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install ruff==0.1.5 codespell==2.2.6 tomli==2.0.1 isort==5.13.2 + pip install ruff==0.1.5 codespell==2.3.0 tomli==2.0.1 isort==5.13.2 - name: Analysing the code with ruff run: | ruff . diff --git a/CMakeLists.txt b/CMakeLists.txt index fab73ebc4a101..f3287651fbd67 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -180,9 +180,9 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") "csrc/quantization/gptq_marlin/gptq_marlin.cu" "csrc/quantization/gptq_marlin/gptq_marlin_repack.cu" "csrc/custom_all_reduce.cu" - "csrc/quantization/cutlass_w8a8/scaled_mm_dq_entry.cu" - "csrc/quantization/cutlass_w8a8/scaled_mm_dq_c2x.cu" - "csrc/quantization/cutlass_w8a8/scaled_mm_dq_c3x.cu") + "csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu" + "csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu" + "csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu") # # The CUTLASS kernels for Hopper require sm90a to be enabled. @@ -190,7 +190,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # That adds an extra 17MB to compiled binary, so instead we selectively enable it. if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0) set_source_files_properties( - "csrc/quantization/cutlass_w8a8/scaled_mm_dq_c3x.cu" + "csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu" PROPERTIES COMPILE_FLAGS "-gencode arch=compute_90a,code=sm_90a") diff --git a/Dockerfile b/Dockerfile index 9f741b0ac7e53..7445e815f424b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,8 +9,8 @@ # prepare basic build environment FROM nvidia/cuda:12.4.1-devel-ubuntu22.04 AS dev -RUN apt-get update -y && \ - apt-get install -y python3-pip git +RUN apt-get update -y \ + && apt-get install -y python3-pip git curl sudo # Workaround for https://github.com/openai/triton/issues/2507 and # https://github.com/pytorch/pytorch/issues/107960 -- hopefully @@ -27,6 +27,8 @@ RUN --mount=type=cache,target=/root/.cache/pip \ pip install -r requirements-cuda.txt # install development dependencies +COPY requirements-lint.txt requirements-lint.txt +COPY requirements-test.txt requirements-test.txt COPY requirements-dev.txt requirements-dev.txt RUN --mount=type=cache,target=/root/.cache/pip \ pip install -r requirements-dev.txt diff --git a/Dockerfile.cpu b/Dockerfile.cpu index 403a1cd0391b0..777bb08296ed9 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -3,9 +3,13 @@ FROM ubuntu:22.04 AS cpu-test-1 RUN apt-get update -y \ - && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip \ + && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 \ && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 +RUN echo 'export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:$LD_PRELOAD' >> ~/.bashrc + +RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_dev/cpu/intel_extension_for_pytorch-2.3.100%2Bgit0eb3473-cp310-cp310-linux_x86_64.whl + RUN pip install --upgrade pip \ && pip install wheel packaging ninja "setuptools>=49.4.0" numpy @@ -21,6 +25,6 @@ RUN VLLM_TARGET_DEVICE=cpu python3 setup.py install WORKDIR /workspace/ -RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks +RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks CMD ["/bin/bash"] diff --git a/Dockerfile.tpu b/Dockerfile.tpu new file mode 100644 index 0000000000000..931c844c08dce --- /dev/null +++ b/Dockerfile.tpu @@ -0,0 +1,19 @@ +ARG NIGHTLY_DATE="20240601" +ARG BASE_IMAGE="us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:nightly_3.10_tpuvm_$NIGHTLY_DATE" + +FROM $BASE_IMAGE + +WORKDIR /workspace +COPY . /workspace/vllm + +ENV VLLM_TARGET_DEVICE="tpu" +# Install aiohttp separately to avoid build errors. +RUN pip install aiohttp +# Install the TPU and Pallas dependencies. +RUN pip install torch_xla[tpu] -f https://storage.googleapis.com/libtpu-releases/index.html +RUN pip install torch_xla[pallas] -f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html -f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html + +# Build vLLM. +RUN cd /workspace/vllm && python setup.py develop + +CMD ["/bin/bash"] diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index 58dcc6167efa6..52386b8cd62b3 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -68,9 +68,13 @@ async def async_request_tgi( chunk_bytes = chunk_bytes.strip() if not chunk_bytes: continue + chunk_bytes = chunk_bytes.decode("utf-8") - chunk = remove_prefix(chunk_bytes.decode("utf-8"), - "data:") + #NOTE: Sometimes TGI returns a ping response without + # any data, we should skip it. + if chunk_bytes.startswith(":"): + continue + chunk = remove_prefix(chunk_bytes, "data:") data = json.loads(chunk) timestamp = time.perf_counter() diff --git a/benchmarks/benchmark_latency.py b/benchmarks/benchmark_latency.py index 1a41b66b38824..17edb7515964a 100644 --- a/benchmarks/benchmark_latency.py +++ b/benchmarks/benchmark_latency.py @@ -189,7 +189,7 @@ def run_to_completion(profile_dir: Optional[str] = None): "--device", type=str, default="cuda", - choices=["cuda", "cpu"], + choices=["cuda", "cpu", "tpu"], help='device type for vLLM execution, supporting CUDA and CPU.') parser.add_argument('--block-size', type=int, diff --git a/benchmarks/benchmark_throughput.py b/benchmarks/benchmark_throughput.py index 90f7433e0ae28..07b2f85410e3c 100644 --- a/benchmarks/benchmark_throughput.py +++ b/benchmarks/benchmark_throughput.py @@ -346,7 +346,7 @@ def main(args: argparse.Namespace): "--device", type=str, default="cuda", - choices=["cuda", "cpu"], + choices=["cuda", "cpu", "tpu"], help='device type for vLLM execution, supporting CUDA and CPU.') parser.add_argument( "--enable-prefix-caching", diff --git a/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py b/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py index 6de56f618700d..182105f0b33f2 100644 --- a/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py +++ b/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py @@ -76,11 +76,7 @@ def pytorch_fp8_impl_fast_accum(a: torch.tensor, b: torch.tensor, def cutlass_impl(a: torch.tensor, b: torch.tensor, scale_a: torch.tensor, scale_b: torch.tensor, out_dtype: torch.dtype) -> torch.tensor: - return ops.cutlass_scaled_mm_dq(a, - b, - scale_a, - scale_b, - out_dtype=out_dtype) + return ops.cutlass_scaled_mm(a, b, scale_a, scale_b, out_dtype=out_dtype) # bench diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake index 61d4843838ba0..a644e5b6a8b21 100644 --- a/cmake/cpu_extension.cmake +++ b/cmake/cpu_extension.cmake @@ -33,6 +33,7 @@ function (find_isa CPUINFO TARGET OUT) endif() endfunction() +find_isa(${CPUINFO} "avx2" AVX2_FOUND) find_isa(${CPUINFO} "avx512f" AVX512_FOUND) if (AVX512_FOUND) @@ -53,8 +54,11 @@ if (AVX512_FOUND) else() message(WARNING "Disable AVX512-BF16 ISA support, no avx512_bf16 found in local CPU flags." " If cross-compilation is required, please set env VLLM_CPU_AVX512BF16=1.") endif() +elseif (AVX2_FOUND) + list(APPEND CXX_COMPILE_FLAGS "-mavx2") + message(WARNING "vLLM CPU backend using AVX2 ISA") else() - message(FATAL_ERROR "vLLM CPU backend requires AVX512 ISA support.") + message(FATAL_ERROR "vLLM CPU backend requires AVX512 or AVX2 ISA support.") endif() message(STATUS "CPU extension compile flags: ${CXX_COMPILE_FLAGS}") diff --git a/csrc/cpu/cpu_types.hpp b/csrc/cpu/cpu_types.hpp index 034c406a532d5..d7621aaae81c9 100644 --- a/csrc/cpu/cpu_types.hpp +++ b/csrc/cpu/cpu_types.hpp @@ -5,6 +5,10 @@ #include #include +#ifndef __AVX2__ +static_assert(false, "AVX2 must be supported for the current implementation."); +#endif + namespace vec_op { // FIXME: FP16 is not fully supported in Torch-CPU @@ -104,6 +108,7 @@ struct BF16Vec16 : public Vec { void save(void *ptr) const { *reinterpret_cast<__m256i *>(ptr) = reg; } }; +#ifdef __AVX512F__ struct BF16Vec32 : public Vec { constexpr static int VEC_ELEM_NUM = 32; @@ -123,6 +128,34 @@ struct BF16Vec32 : public Vec { void save(void *ptr) const { *reinterpret_cast<__m512i *>(ptr) = reg; } }; +#else +struct BF16Vec32 : public Vec { + constexpr static int VEC_ELEM_NUM = 32; + + __m256i reg_low; + __m256i reg_high; + + explicit BF16Vec32(const void *ptr) + : reg_low(_mm256_loadu_si256((__m256i const *)ptr)), + reg_high(_mm256_loadu_si256((__m256i const *)ptr + 1)) {} + + explicit BF16Vec32(__m256i low, __m256i high) : reg_low(low), + reg_high(high) {} + + explicit BF16Vec32(BF16Vec8 &vec8_data) + : reg_low((__m256i)_mm256_inserti32x4( + _mm256_castsi128_si256((__m128i)vec8_data.reg), + (__m128i)vec8_data.reg, 1)), + reg_high((__m256i)_mm256_inserti32x4( + _mm256_castsi128_si256((__m128i)vec8_data.reg), + (__m128i)vec8_data.reg, 1)) {} + + void save(void *ptr) const { + *reinterpret_cast<__m256i *>(ptr) = reg_low; + *reinterpret_cast<__m256i *>((__m256i *)ptr + 1) = reg_high; + } +}; +#endif struct FP32Vec4 : public Vec { constexpr static int VEC_ELEM_NUM = 4; @@ -226,6 +259,7 @@ struct FP32Vec8 : public Vec { void save(float *ptr) const { _mm256_storeu_ps(ptr, reg); } }; +#ifdef __AVX512F__ struct FP32Vec16 : public Vec { constexpr static int VEC_ELEM_NUM = 16; union AliasReg { @@ -290,6 +324,114 @@ struct FP32Vec16 : public Vec { void save(float *ptr) const { _mm512_storeu_ps(ptr, reg); } }; +#else +struct FP32Vec16 : public Vec { + constexpr static int VEC_ELEM_NUM = 16; + + union AliasReg { + __m256 reg; + float values[8]; + }; + + __m256 reg_low; + __m256 reg_high; + + explicit FP32Vec16(float v) : reg_low(_mm256_set1_ps(v)), + reg_high(_mm256_set1_ps(v)) {} + + explicit FP32Vec16() : reg_low(_mm256_set1_ps(0.0)), + reg_high(_mm256_set1_ps(0.0)) {} + + explicit FP32Vec16(const float *ptr) : reg_low(_mm256_loadu_ps(ptr)), + reg_high(_mm256_loadu_ps(ptr + 8)) {} + + explicit FP32Vec16(__m256 low, __m256 high) : reg_low(low), reg_high(high) {} + + explicit FP32Vec16(const FP32Vec16 &data) : reg_low(data.reg_low), + reg_high(data.reg_high) {} + + explicit FP32Vec16(const FP32Vec4 &data) + : reg_low((__m256)_mm256_inserti128_si256( + _mm256_castsi128_si256((__m128i)data.reg), + (__m128i)data.reg, 1)), + reg_high((__m256)_mm256_inserti128_si256( + _mm256_castsi128_si256((__m128i)data.reg), + (__m128i)data.reg, 1)) {} + + explicit FP32Vec16(const FP32Vec8 &data) + : reg_low(data.reg), reg_high(data.reg) {} + + explicit FP32Vec16(const BF16Vec16 &v) { + __m128i low = _mm256_extractf128_si256(v.reg, 0); + __m128i high = _mm256_extractf128_si256(v.reg, 1); + + __m256i v_low_epi32 = _mm256_cvtepu16_epi32(low); + __m256i v_high_epi32 = _mm256_cvtepu16_epi32(high); + + __m256i v_low_shifted = _mm256_bslli_epi128(v_low_epi32, 2); + __m256i v_high_shifted = _mm256_bslli_epi128(v_high_epi32, 2); + + reg_low = _mm256_castsi256_ps(v_low_shifted); + reg_high = _mm256_castsi256_ps(v_high_shifted); + } + + explicit FP32Vec16(const BF16Vec8 &v) : FP32Vec16(FP32Vec8(v)) {} + + FP32Vec16 operator*(const FP32Vec16 &b) const { + return FP32Vec16(_mm256_mul_ps(reg_low, b.reg_low), + _mm256_mul_ps(reg_high, b.reg_high)); + } + + FP32Vec16 operator+(const FP32Vec16 &b) const { + return FP32Vec16(_mm256_add_ps(reg_low, b.reg_low), + _mm256_add_ps(reg_high, b.reg_high)); + } + + FP32Vec16 operator-(const FP32Vec16 &b) const { + return FP32Vec16(_mm256_sub_ps(reg_low, b.reg_low), + _mm256_sub_ps(reg_high, b.reg_high)); + } + + FP32Vec16 operator/(const FP32Vec16 &b) const { + return FP32Vec16(_mm256_div_ps(reg_low, b.reg_low), + _mm256_div_ps(reg_high, b.reg_high)); + } + + float reduce_sum() const { + FP32Vec8 low = FP32Vec8(reg_low); + FP32Vec8 high = FP32Vec8(reg_high); + return low.reduce_sum() + high.reduce_sum(); + } + + template float reduce_sub_sum(int idx) { + float sum = 0.0; + static_assert(VEC_ELEM_NUM % group_size == 0); + constexpr uint32_t base_mask = (0xFFFF >> (16 - group_size)); + uint32_t mask = base_mask << (idx * group_size); + + AliasReg ar; + + auto func = [&sum, &mask, &ar](int i) { + int flag = mask & 0x1; + mask = mask >> 1; + if (flag != 0) sum += ar.values[i]; + }; + + ar.reg = reg_low; + unroll_loop(func); + + ar.reg = reg_high; + unroll_loop(func); + + return sum; + } + + void save(float *ptr) const { + _mm256_storeu_ps(ptr, reg_low); + _mm256_storeu_ps(ptr + 8, reg_high); + } +}; +#endif template struct VecType { using vec_type = void; }; @@ -336,6 +478,7 @@ template <> inline void storeFP32(float v, c10::BFloat16 *ptr) { *ptr = *(v_ptr + 1); } +#ifdef __AVX512F__ inline BF16Vec8::BF16Vec8(const FP32Vec8 &v) : reg(_mm256_cvtepi32_epi16( _mm256_bsrli_epi128(_mm256_castps_si256(v.reg), 2))) {} @@ -343,7 +486,27 @@ inline BF16Vec8::BF16Vec8(const FP32Vec8 &v) inline BF16Vec16::BF16Vec16(const FP32Vec16 &v) : reg(_mm512_cvtepi32_epi16( _mm512_bsrli_epi128(_mm512_castps_si512(v.reg), 2))) {} -#endif +#else +namespace{ +__m128i FP32Vec8_to_BF16Vec8_avx2(__m256 a) { + __m256i ai = _mm256_castps_si256(a); + ai = _mm256_srli_epi32(ai, 16); + ai = _mm256_packus_epi32(ai, ai); + ai = _mm256_permute4x64_epi64(ai, 0b00111001); + return _mm256_extracti128_si256(ai, 0); +} +} + +inline BF16Vec8::BF16Vec8(const FP32Vec8 &v) + : reg(FP32Vec8_to_BF16Vec8_avx2(v.reg)) {} + +inline BF16Vec16::BF16Vec16(const FP32Vec16 &v) { + BF16Vec8 low = BF16Vec8(FP32Vec8(v.reg_low)); + BF16Vec8 high = BF16Vec8(FP32Vec8(v.reg_high)); + reg = _mm256_insertf128_si256(_mm256_castsi128_si256(low.reg), high.reg, 1); +} +#endif // __AVX512F__ +#endif // __AVX512BF16__ inline void prefetch(const void *addr) { _mm_prefetch(addr, _MM_HINT_T1); } diff --git a/csrc/ops.h b/csrc/ops.h index 0c270a78c331f..9e2e977fa3c2e 100644 --- a/csrc/ops.h +++ b/csrc/ops.h @@ -90,9 +90,9 @@ torch::Tensor gptq_marlin_repack(torch::Tensor& b_q_weight, torch::Tensor& perm, int64_t size_k, int64_t size_n, int64_t num_bits); -void cutlass_scaled_mm_dq(torch::Tensor& out, torch::Tensor const& a, - torch::Tensor const& b, torch::Tensor const& a_scales, - torch::Tensor const& b_scales); +void cutlass_scaled_mm(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, torch::Tensor const& a_scales, + torch::Tensor const& b_scales); #endif diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_dq_c2x.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu similarity index 71% rename from csrc/quantization/cutlass_w8a8/scaled_mm_dq_c2x.cu rename to csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu index 23a8b4070b70e..7651268dc5316 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_dq_c2x.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu @@ -29,21 +29,14 @@ using namespace cute; /* - This defines a quantized GEMM operation with dequantized output, similar to - torch._scaled_mm. It is defined using the CUTLASS 2.x API, and is used for + This file defines quantized GEMM operations using the CUTLASS 2.x API, for NVIDIA GPUs with SM versions prior to sm90 (Hopper). - A and B may be both either int8 or fp8_e4m3. A can be quantized per-tensor or - per-row. B can be quantized per-tensor or per-column. - Any combination of per-tensor and per-row or column is supported. - A and B must have symmetric quantization (zero point == 0). - - So the GEMM operation is D = (a_scales * A) (b_scales * B), where the - scales are applied elementwise with numpy-style broadcasting. - - ScaleA and ScaleB define the epilogue functions that apply the scales for - the A and B operands respectively. These scales may be either per-tensor or - per row or column. + Epilogue functions can be defined to post-process the output before it is + written to GPU memory. + Epilogues must contain a public type named EVTCompute of type Sm80EVT, + as well as a static prepare_args function that constructs an + EVTCompute::Arguments struct. */ namespace { @@ -83,27 +76,25 @@ struct enable_sm89_to_sm90 : Kernel { } }; -template typename ArchGuard, - typename ElementAB_, typename ElementD_, typename TileShape, - typename WarpShape, typename InstructionShape, int32_t MainLoopStages> -struct cutlass_2x_gemm { - using ElementAB = ElementAB_; - using ElementD = ElementD_; - - using ElementAcc = - typename std::conditional, int32_t, - float>::type; +/* + This epilogue function defines a quantized GEMM operation similar to + torch._scaled_mm. - using Operator = - typename std::conditional, - cutlass::arch::OpMultiplyAddSaturate, - cutlass::arch::OpMultiplyAdd>::type; + A and B may be both either int8 or fp8_e4m3. A can be quantized per-tensor or + per-row. B can be quantized per-tensor or per-column. + Any combination of per-tensor and per-row or column is supported. + A and B must have symmetric quantization (zero point == 0). - using OutputTileThreadMap = - cutlass::epilogue::threadblock::OutputTileThreadLayout< - TileShape, WarpShape, float, 4, 1 /* epilogue stages */ - >; + So the GEMM operation is D = (a_scales * A) (b_scales * B), where the + scales are applied elementwise with numpy-style broadcasting. + ScaleA and ScaleB define the epilogue functions that apply the scales for + the A and B operands respectively. These scales may be either per-tensor or + per row or column. +*/ +template +struct ScaledEpilogue { + private: using Accum = cutlass::epilogue::threadblock::VisitorAccFetch; using ScaleA = cutlass::epilogue::threadblock::VisitorColOrScalarBroadcast< @@ -123,14 +114,56 @@ struct cutlass_2x_gemm { cutlass::multiplies, ElementD, float, cutlass::FloatRoundStyle::round_to_nearest>; - using EVTCompute1 = + public: + using EVTCompute = cutlass::epilogue::threadblock::Sm80EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { + using ScaleAArgs = typename ScaleA::Arguments; + using ScaleBArgs = typename ScaleB::Arguments; + + ScaleBArgs b_args{b_scales.data_ptr(), b_scales.numel() != 1, {}}; + ScaleAArgs a_args{a_scales.data_ptr(), a_scales.numel() != 1, {}}; + + typename EVTCompute0::Arguments evt0_compute_args{b_args}; + + typename EVTCompute::Arguments evt_compute_args{a_args, evt0_compute_args}; + return evt_compute_args; + } +}; + +template typename ArchGuard, + typename ElementAB_, typename ElementD_, + template typename Epilogue_, typename TileShape, + typename WarpShape, typename InstructionShape, int32_t MainLoopStages> +struct cutlass_2x_gemm { + using ElementAB = ElementAB_; + using ElementD = ElementD_; + + using ElementAcc = + typename std::conditional, int32_t, + float>::type; + + using Operator = + typename std::conditional, + cutlass::arch::OpMultiplyAddSaturate, + cutlass::arch::OpMultiplyAdd>::type; + + using OutputTileThreadMap = + cutlass::epilogue::threadblock::OutputTileThreadLayout< + TileShape, WarpShape, float, 4, 1 /* epilogue stages */ + >; + + using Epilogue = Epilogue_; + using EVTCompute = typename Epilogue::EVTCompute; using D = cutlass::epilogue::threadblock::VisitorAuxStore< OutputTileThreadMap, ElementD, cutlass::FloatRoundStyle::round_to_nearest, Stride, Int<0>>>; - using EVTD = cutlass::epilogue::threadblock::Sm80EVT; + using EVTD = cutlass::epilogue::threadblock::Sm80EVT; // clang-format off using RowMajor = typename cutlass::layout::RowMajor; @@ -153,11 +186,10 @@ struct cutlass_2x_gemm { using Op = cutlass::gemm::device::GemmUniversalAdapter; }; -template -void cutlass_scaled_mm_dq_dispatcher(torch::Tensor& out, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +template +void cutlass_gemm_caller(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, + EpilogueArgs&&... epilogue_params) { using ElementAB = typename Gemm::ElementAB; using ElementD = typename Gemm::ElementD; @@ -177,23 +209,14 @@ void cutlass_scaled_mm_dq_dispatcher(torch::Tensor& out, torch::Tensor const& a, auto b_ptr = static_cast(b.data_ptr()); auto c_ptr = static_cast(out.data_ptr()); - auto a_scales_ptr = a_scales.data_ptr(); - auto b_scales_ptr = b_scales.data_ptr(); - - using ScaleAArgs = typename Gemm::ScaleA::Arguments; - using ScaleBArgs = typename Gemm::ScaleB::Arguments; - - ScaleBArgs b_args{b_scales.data_ptr(), b_scales.numel() != 1, {}}; - ScaleAArgs a_args{a_scales.data_ptr(), a_scales.numel() != 1, {}}; - - typename Gemm::EVTCompute0::Arguments evt0_compute_args{b_args}; - - typename Gemm::EVTCompute1::Arguments evt1_compute_args{a_args, - evt0_compute_args}; typename Gemm::D::Arguments d_args{c_ptr, c_stride}; + using Epilogue = typename Gemm::Epilogue; + auto evt_args = + Epilogue::prepare_args(std::forward(epilogue_params)...); + typename Gemm::EVTD::Arguments epilogue_args{ - evt1_compute_args, + evt_args, d_args, }; @@ -229,10 +252,10 @@ void cutlass_scaled_mm_dq_dispatcher(torch::Tensor& out, torch::Tensor const& a, } // namespace -void cutlass_scaled_mm_dq_sm75(torch::Tensor& out, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +void cutlass_scaled_mm_sm75(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { TORCH_CHECK(a.dtype() == torch::kInt8); TORCH_CHECK(b.dtype() == torch::kInt8); TORCH_CHECK(a_scales.dtype() == torch::kFloat32); @@ -243,23 +266,23 @@ void cutlass_scaled_mm_dq_sm75(torch::Tensor& out, torch::Tensor const& a, using InstructionShape = typename cutlass::gemm::GemmShape<8, 8, 16>; if (out.dtype() == torch::kBFloat16) { - return cutlass_scaled_mm_dq_dispatcher>(out, a, b, a_scales, - b_scales); + ScaledEpilogue, TileShape, WarpShape, InstructionShape, 2>>( + out, a, b, a_scales, b_scales); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return cutlass_scaled_mm_dq_dispatcher>(out, a, b, a_scales, - b_scales); + ScaledEpilogue, TileShape, WarpShape, InstructionShape, 2>>( + out, a, b, a_scales, b_scales); } } -void cutlass_scaled_mm_dq_sm80(torch::Tensor& out, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +void cutlass_scaled_mm_sm80(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { TORCH_CHECK(a.dtype() == torch::kInt8); TORCH_CHECK(b.dtype() == torch::kInt8); TORCH_CHECK(a_scales.dtype() == torch::kFloat32); @@ -270,23 +293,23 @@ void cutlass_scaled_mm_dq_sm80(torch::Tensor& out, torch::Tensor const& a, using InstructionShape = typename cutlass::gemm::GemmShape<16, 8, 32>; if (out.dtype() == torch::kBFloat16) { - return cutlass_scaled_mm_dq_dispatcher>(out, a, b, a_scales, - b_scales); + ScaledEpilogue, TileShape, WarpShape, InstructionShape, 5>>( + out, a, b, a_scales, b_scales); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return cutlass_scaled_mm_dq_dispatcher>(out, a, b, a_scales, - b_scales); + ScaledEpilogue, TileShape, WarpShape, InstructionShape, 5>>( + out, a, b, a_scales, b_scales); } } -void cutlass_scaled_mm_dq_sm89(torch::Tensor& out, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +void cutlass_scaled_mm_sm89(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { using TileShape = typename cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = typename cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = typename cutlass::gemm::GemmShape<16, 8, 32>; @@ -298,32 +321,32 @@ void cutlass_scaled_mm_dq_sm89(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kInt8); if (out.dtype() == torch::kBFloat16) { - return cutlass_scaled_mm_dq_dispatcher>(out, a, b, a_scales, - b_scales); + ScaledEpilogue, TileShape, WarpShape, InstructionShape, 5>>( + out, a, b, a_scales, b_scales); } else { assert(out.dtype() == torch::kFloat16); - return cutlass_scaled_mm_dq_dispatcher>(out, a, b, a_scales, - b_scales); + ScaledEpilogue, TileShape, WarpShape, InstructionShape, 5>>( + out, a, b, a_scales, b_scales); } } else { TORCH_CHECK(a.dtype() == torch::kFloat8_e4m3fn); TORCH_CHECK(b.dtype() == torch::kFloat8_e4m3fn); if (out.dtype() == torch::kBFloat16) { - return cutlass_scaled_mm_dq_dispatcher>( - out, a, b, a_scales, b_scales); + cutlass::bfloat16_t, ScaledEpilogue, TileShape, WarpShape, + InstructionShape, 5>>(out, a, b, a_scales, b_scales); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return cutlass_scaled_mm_dq_dispatcher>( - out, a, b, a_scales, b_scales); + cutlass::half_t, ScaledEpilogue, TileShape, WarpShape, + InstructionShape, 5>>(out, a, b, a_scales, b_scales); } } } diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_dq_c3x.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu similarity index 66% rename from csrc/quantization/cutlass_w8a8/scaled_mm_dq_c3x.cu rename to csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu index a99802153643a..f1a2b73ff962b 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_dq_c3x.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu @@ -32,21 +32,14 @@ using namespace cute; /* - This defines a quantized GEMM operation with dequantized output, similar to - torch._scaled_mm. It is defined using the CUTLASS 3.x API, and is used for + This file defines quantized GEMM operations using the CUTLASS 3.x API, for NVIDIA GPUs with sm90a (Hopper) or later. - A and B may be both either int8 or fp8_e4m3. A can be quantized per-tensor or - per-row. B can be quantized per-tensor or per-column. - Any combination of per-tensor and per-row or column is supported. - A and B must have symmetric quantization (zero point == 0). - - So the GEMM operation is D = (a_scales * A) (b_scales * B), where the - scales are applied elementwise with numpy-style broadcasting. - - ScaleA and ScaleB define the epilogue functions that apply the scales for - the A and B operands respectively. These scales may be either per-tensor or - per row or column. + Epilogue functions can be defined to post-process the output before it is + written to GPU memory. + Epilogues must contain a public type named EVTCompute of type Sm90EVT, + as well as a static prepare_args function that constructs an + EVTCompute::Arguments struct. */ namespace { @@ -71,21 +64,25 @@ struct enable_sm90_or_later : Kernel { } }; -template -struct cutlass_3x_gemm { - using ElementAB = ElementAB_; - using ElementD = ElementD_; - using ElementAcc = - typename std::conditional, int32_t, - float>::type; +/* + This epilogue function defines a quantized GEMM operation similar to + torch.scaled_mm_. - using EpilogueDescriptor = - cutlass::epilogue::collective::detail::EpilogueDescriptor< - TileShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementD, - ElementD, EpilogueSchedule>; + A and B may be both either int8 or fp8_e4m3. A can be + quantized per-tensor or per-row. B can be quantized per-tensor or per-column. + Any combination of per-tensor and per-row or column is supported. + A and B must have symmetric quantization (zero point == 0). + So the GEMM operation is D = (a_scales * A) (b_scales * B), where the + scales are applied elementwise with numpy-style broadcasting. + + ScaleA and ScaleB define the epilogue functions that apply the scales for + the A and B operands respectively. These scales may be either per-tensor or + per row or column. +*/ +template +struct ScaledEpilogue { + private: using Accum = cutlass::epilogue::fusion::Sm90AccFetch; using ScaleA = cutlass::epilogue::fusion::Sm90ColOrScalarBroadcast< @@ -111,19 +108,53 @@ struct cutlass_3x_gemm { cutlass::multiplies, ElementD, float, cutlass::FloatRoundStyle::round_to_nearest>; - using EVTCompute1 = + public: + using EVTCompute = cutlass::epilogue::fusion::Sm90EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { + using ScaleA_Args = typename ScaleA::Arguments; + using ScaleB_Args = typename ScaleB::Arguments; + + ScaleA_Args a_args{a_scales.data_ptr(), a_scales.numel() != 1, {}}; + ScaleB_Args b_args{b_scales.data_ptr(), b_scales.numel() != 1, {}}; + + return ArgumentType{a_args, {b_args}}; + } +}; + +template typename Epilogue_, + typename TileShape, typename ClusterShape, typename KernelSchedule, + typename EpilogueSchedule> +struct cutlass_3x_gemm { + using ElementAB = ElementAB_; + using ElementD = ElementD_; + using ElementAcc = + typename std::conditional, int32_t, + float>::type; + + using EpilogueDescriptor = + cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementD, + ElementD, EpilogueSchedule>; + + using Epilogue = Epilogue_; using StrideD = Stride, Int<0>>; using ElementC = void; using StrideC = StrideD; + using EVTCompute = typename Epilogue::EVTCompute; + using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape, ClusterShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementAcc, float, ElementC, StrideC, 4, ElementD, StrideD, 4, - EpilogueSchedule, EVTCompute1>::CollectiveOp; + EpilogueSchedule, EVTCompute>::CollectiveOp; static constexpr size_t CEStorageSize = sizeof(typename CollectiveEpilogue::SharedStorage); @@ -148,11 +179,10 @@ struct cutlass_3x_gemm { struct GemmKernel : public KernelType {}; }; -template -void cutlass_scaled_mm_dq_dispatcher(torch::Tensor& out, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +template +void cutlass_gemm_caller(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, + EpilogueArgs&&... epilogue_params) { using ElementAB = typename Gemm::ElementAB; using ElementD = typename Gemm::ElementD; @@ -182,19 +212,13 @@ void cutlass_scaled_mm_dq_dispatcher(torch::Tensor& out, torch::Tensor const& a, auto c_ptr = static_cast(out.data_ptr()); typename GemmKernel::EpilogueArguments epilogue_args{ - {}, c_ptr, c_stride, c_ptr, c_stride}; + Gemm::Epilogue::prepare_args( + std::forward(epilogue_params)...), + c_ptr, c_stride, c_ptr, c_stride}; typename GemmKernel::Arguments args{cutlass::gemm::GemmUniversalMode::kGemm, prob_shape, mainloop_args, epilogue_args}; - using ScaleA_Args = typename Gemm::ScaleA::Arguments; - using ScaleB_Args = typename Gemm::ScaleB::Arguments; - - ScaleA_Args a_args{a_scales.data_ptr(), a_scales.numel() != 1, {}}; - ScaleB_Args b_args{b_scales.data_ptr(), b_scales.numel() != 1, {}}; - - args.epilogue.thread = {a_args, {b_args}}; - // Launch the CUTLASS GEMM kernel. using GemmOp = cutlass::gemm::device::GemmUniversalAdapter; GemmOp gemm_op; @@ -209,7 +233,8 @@ void cutlass_scaled_mm_dq_dispatcher(torch::Tensor& out, torch::Tensor const& a, CUTLASS_CHECK(status); } -template +template typename Epilogue, int32_t M> struct sm90_fp8_config { static_assert(std::is_same()); using KernelSchedule = @@ -219,12 +244,13 @@ struct sm90_fp8_config { using ClusterShape = Shape<_2, _1, _1>; using Cutlass3xGemm = - cutlass_3x_gemm; + cutlass_3x_gemm; }; -template -struct sm90_fp8_config { +template typename Epilogue> +struct sm90_fp8_config { static_assert(std::is_same()); using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum; @@ -233,12 +259,13 @@ struct sm90_fp8_config { using ClusterShape = Shape<_2, _1, _1>; using Cutlass3xGemm = - cutlass_3x_gemm; + cutlass_3x_gemm; }; -template -struct sm90_fp8_config { +template typename Epilogue> +struct sm90_fp8_config { static_assert(std::is_same()); using KernelSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum; @@ -247,30 +274,28 @@ struct sm90_fp8_config { using ClusterShape = Shape<_1, _8, _1>; using Cutlass3xGemm = - cutlass_3x_gemm; + cutlass_3x_gemm; }; } // namespace -template -void cutlass_scaled_mm_dq_sm90_fp8_dispatch(torch::Tensor& out, - torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +template typename Epilogue, + typename... EpilogueArgs> +void cutlass_gemm_sm90_fp8_dispatch(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, + EpilogueArgs&&... args) { static_assert(std::is_same()); TORCH_CHECK(a.dtype() == torch::kFloat8_e4m3fn); TORCH_CHECK(b.dtype() == torch::kFloat8_e4m3fn); - TORCH_CHECK(a_scales.dtype() == torch::kFloat32); - TORCH_CHECK(b_scales.dtype() == torch::kFloat32); using Cutlass3xGemmDefault = - typename sm90_fp8_config::Cutlass3xGemm; + typename sm90_fp8_config::Cutlass3xGemm; using Cutlass3xGemmM64 = - typename sm90_fp8_config::Cutlass3xGemm; + typename sm90_fp8_config::Cutlass3xGemm; using Cutlass3xGemmM128 = - typename sm90_fp8_config::Cutlass3xGemm; + typename sm90_fp8_config::Cutlass3xGemm; uint32_t const m = a.size(0); uint32_t const mp2 = @@ -278,23 +303,23 @@ void cutlass_scaled_mm_dq_sm90_fp8_dispatch(torch::Tensor& out, if (mp2 <= 64) { // m in [1, 64] - return cutlass_scaled_mm_dq_dispatcher( - out, a, b, a_scales, b_scales); + return cutlass_gemm_caller( + out, a, b, std::forward(args)...); } else if (mp2 <= 128) { // m in (64, 128] - return cutlass_scaled_mm_dq_dispatcher( - out, a, b, a_scales, b_scales); + return cutlass_gemm_caller( + out, a, b, std::forward(args)...); } else { // m in (128, inf) - return cutlass_scaled_mm_dq_dispatcher( - out, a, b, a_scales, b_scales); + return cutlass_gemm_caller( + out, a, b, std::forward(args)...); } } -void cutlass_scaled_mm_dq_sm90(torch::Tensor& out, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +void cutlass_scaled_mm_sm90(torch::Tensor& out, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { TORCH_CHECK(a_scales.dtype() == torch::kFloat32); TORCH_CHECK(b_scales.dtype() == torch::kFloat32); @@ -308,16 +333,15 @@ void cutlass_scaled_mm_dq_sm90(torch::Tensor& out, torch::Tensor const& a, using EpilogueSchedule = typename cutlass::epilogue::TmaWarpSpecialized; if (out.dtype() == torch::kBFloat16) { - return cutlass_scaled_mm_dq_dispatcher< - cutlass_3x_gemm>( - out, a, b, a_scales, b_scales); + return cutlass_gemm_caller>(out, a, b, a_scales, b_scales); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return cutlass_scaled_mm_dq_dispatcher< - cutlass_3x_gemm>( + return cutlass_gemm_caller< + cutlass_3x_gemm>( out, a, b, a_scales, b_scales); } } else { @@ -325,13 +349,13 @@ void cutlass_scaled_mm_dq_sm90(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kFloat8_e4m3fn); if (out.dtype() == torch::kBFloat16) { - return cutlass_scaled_mm_dq_sm90_fp8_dispatch( + return cutlass_gemm_sm90_fp8_dispatch< + cutlass::float_e4m3_t, cutlass::bfloat16_t, ScaledEpilogue>( out, a, b, a_scales, b_scales); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return cutlass_scaled_mm_dq_sm90_fp8_dispatch( + return cutlass_gemm_sm90_fp8_dispatch( out, a, b, a_scales, b_scales); } } diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_dq_entry.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu similarity index 50% rename from csrc/quantization/cutlass_w8a8/scaled_mm_dq_entry.cu rename to csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu index 423e64a4932e2..687f8efd8dc00 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_dq_entry.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu @@ -3,31 +3,31 @@ #include #include -void cutlass_scaled_mm_dq_sm75(torch::Tensor& c, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales); +void cutlass_scaled_mm_sm75(torch::Tensor& c, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales); -void cutlass_scaled_mm_dq_sm80(torch::Tensor& c, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales); +void cutlass_scaled_mm_sm80(torch::Tensor& c, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales); -void cutlass_scaled_mm_dq_sm89(torch::Tensor& c, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales); +void cutlass_scaled_mm_sm89(torch::Tensor& c, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales); #if defined CUDA_VERSION && CUDA_VERSION >= 12000 -void cutlass_scaled_mm_dq_sm90(torch::Tensor& c, torch::Tensor const& a, - torch::Tensor const& b, - torch::Tensor const& a_scales, - torch::Tensor const& b_scales); +void cutlass_scaled_mm_sm90(torch::Tensor& c, torch::Tensor const& a, + torch::Tensor const& b, + torch::Tensor const& a_scales, + torch::Tensor const& b_scales); #endif -void cutlass_scaled_mm_dq(torch::Tensor& c, torch::Tensor const& a, - torch::Tensor const& b, torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { +void cutlass_scaled_mm(torch::Tensor& c, torch::Tensor const& a, + torch::Tensor const& b, torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { int32_t major_capability; int32_t minor_capability; cudaDeviceGetAttribute(&major_capability, cudaDevAttrComputeCapabilityMajor, @@ -57,19 +57,19 @@ void cutlass_scaled_mm_dq(torch::Tensor& c, torch::Tensor const& a, // Guard against compilation issues for sm90 kernels #if defined CUDA_VERSION && CUDA_VERSION >= 12000 - cutlass_scaled_mm_dq_sm90(c, a, b, a_scales, b_scales); + cutlass_scaled_mm_sm90(c, a, b, a_scales, b_scales); #else - cutlass_scaled_mm_dq_sm80(c, a, b, a_scales, b_scales); + cutlass_scaled_mm_sm80(c, a, b, a_scales, b_scales); #endif } else if (version_num == 89) { // Ada Lovelace - cutlass_scaled_mm_dq_sm89(c, a, b, a_scales, b_scales); + cutlass_scaled_mm_sm89(c, a, b, a_scales, b_scales); } else if (version_num >= 80) { // Ampere - cutlass_scaled_mm_dq_sm80(c, a, b, a_scales, b_scales); + cutlass_scaled_mm_sm80(c, a, b, a_scales, b_scales); } else { // Turing TORCH_CHECK(version_num >= 75); - cutlass_scaled_mm_dq_sm75(c, a, b, a_scales, b_scales); + cutlass_scaled_mm_sm75(c, a, b, a_scales, b_scales); } } diff --git a/csrc/quantization/fp8/common.cu b/csrc/quantization/fp8/common.cu index 8c5b693bf6ed7..6120086d72df2 100644 --- a/csrc/quantization/fp8/common.cu +++ b/csrc/quantization/fp8/common.cu @@ -23,8 +23,8 @@ __device__ __forceinline__ float atomicMaxFloat(float* addr, float value) { template __device__ __forceinline__ c10::Float8_e4m3fn scaled_fp8_conversion( - const scalar_t val, const float scale) { - float x = static_cast(val) / scale; + const scalar_t val, const float inverted_scale) { + float x = static_cast(val) * inverted_scale; float r = fmax(-FP8_E4M3_MAX, fmin(x, FP8_E4M3_MAX)); return static_cast(r); } @@ -71,15 +71,56 @@ __global__ void segmented_max_reduction(float* __restrict__ scale, } } +template +struct __align__(8) vec4_t { + scalar_t x; + scalar_t y; + scalar_t z; + scalar_t w; +}; + +typedef struct __align__(4) { + c10::Float8_e4m3fn x; + c10::Float8_e4m3fn y; + c10::Float8_e4m3fn z; + c10::Float8_e4m3fn w; +} +float8x4_t; + template __global__ void scaled_fp8_quant_kernel(c10::Float8_e4m3fn* __restrict__ out, const scalar_t* __restrict__ input, const float* __restrict__ scale, int64_t num_elems) { - int i = blockDim.x * blockIdx.x + threadIdx.x; - while (i < num_elems) { - out[i] = scaled_fp8_conversion(input[i], *scale); - i += blockDim.x * gridDim.x; + int tid = blockDim.x * blockIdx.x + threadIdx.x; + + // Invert the scale so that we can use multiplications to avoid expensive + // division. + const float inverted_scale = 1.0f / (*scale); + + // Vectorized input/output to better utilize memory bandwidth. + const vec4_t* vectorized_in = + reinterpret_cast*>(input); + float8x4_t* vectorized_out = reinterpret_cast(out); + + int num_vec_elems = num_elems >> 2; + +#pragma unroll 4 + for (int i = tid; i < num_vec_elems; i += blockDim.x * gridDim.x) { + vec4_t in_vec = vectorized_in[i]; + float8x4_t out_vec; + + out_vec.x = scaled_fp8_conversion(in_vec.x, inverted_scale); + out_vec.y = scaled_fp8_conversion(in_vec.y, inverted_scale); + out_vec.z = scaled_fp8_conversion(in_vec.z, inverted_scale); + out_vec.w = scaled_fp8_conversion(in_vec.w, inverted_scale); + vectorized_out[i] = out_vec; + } + + // Handle the remaining elements if num_elems is not divisible by 4 + for (int i = num_vec_elems * 4 + tid; i < num_elems; + i += blockDim.x * gridDim.x) { + out[i] = scaled_fp8_conversion(input[i], inverted_scale); } } diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index c240360d6d203..49c55b6499a9b 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -143,10 +143,10 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // CUTLASS w8a8 GEMM, supporting symmetric per-tensor or per-row/column // quantization. ops.def( - "cutlass_scaled_mm_dq(Tensor! out, Tensor a," - " Tensor b, Tensor a_scales," - " Tensor b_scales) -> ()"); - ops.impl("cutlass_scaled_mm_dq", torch::kCUDA, &cutlass_scaled_mm_dq); + "cutlass_scaled_mm(Tensor! out, Tensor a," + " Tensor b, Tensor a_scales," + " Tensor b_scales) -> ()"); + ops.impl("cutlass_scaled_mm", torch::kCUDA, &cutlass_scaled_mm); #endif // Quantized GEMM for GPTQ. diff --git a/docs/source/community/meetups.rst b/docs/source/community/meetups.rst index f371194781de3..0fde31ef9b059 100644 --- a/docs/source/community/meetups.rst +++ b/docs/source/community/meetups.rst @@ -5,6 +5,7 @@ vLLM Meetups We host regular meetups in San Francisco Bay Area every 2 months. We will share the project updates from the vLLM team and have guest speakers from the industry to share their experience and insights. Please find the materials of our previous meetups below: +- `The fourth vLLM meetup `__, with Cloudflare and BentoML, June 11th 2024. `[Slides] `__ - `The third vLLM meetup `__, with Roblox, April 2nd 2024. `[Slides] `__ - `The second vLLM meetup `__, with IBM Research, January 31st 2024. `[Slides] `__ `[Video (vLLM Update)] `__ `[Video (IBM Research & torch.compile)] `__ - `The first vLLM meetup `__, with a16z, October 5th 2023. `[Slides] `__ diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 5270253cae9ab..a9544e8a59a3d 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -10,6 +10,7 @@ Table of contents: #. :ref:`Requirements ` #. :ref:`Quick start using Dockerfile ` #. :ref:`Build from source ` +#. :ref:`Intel Extension for PyTorch ` #. :ref:`Performance tips ` .. _cpu_backend_requirements: @@ -18,7 +19,7 @@ Requirements ------------ * OS: Linux -* Compiler: gcc/g++>=12.3.0 (recommended) +* Compiler: gcc/g++>=12.3.0 (optional, recommended) * Instruction set architecture (ISA) requirement: AVX512 is required. .. _cpu_backend_quick_start_dockerfile: @@ -41,7 +42,7 @@ Quick start using Dockerfile Build from source ----------------- -- First, install required compiler. We recommend to use ``gcc/g++ >= 12.3.0`` as the default compiler to avoid potential problems. For example, on Ubuntu 22.4, you can run: +- First, install recommended compiler. We recommend to use ``gcc/g++ >= 12.3.0`` as the default compiler to avoid potential problems. For example, on Ubuntu 22.4, you can run: .. code-block:: console @@ -70,6 +71,15 @@ Build from source - If you want to force enable AVX512_BF16 for the cross-compilation, please set environment variable VLLM_CPU_AVX512BF16=1 before the building. +.. _ipex_guidance: + +Intel Extension for PyTorch +--------------------------- + +- `Intel Extension for PyTorch (IPEX) `_ extends PyTorch with up-to-date features optimizations for an extra performance boost on Intel hardware. + +- IPEX after the ``2.3.0`` can be enabled in the CPU backend by default if it is installed. + .. _cpu_backend_performance_tips: Performance tips @@ -77,6 +87,15 @@ Performance tips - vLLM CPU backend uses environment variable ``VLLM_CPU_KVCACHE_SPACE`` to specify the KV Cache size (e.g, ``VLLM_CPU_KVCACHE_SPACE=40`` means 40 GB space for KV cache), larger setting will allow vLLM running more requests in parallel. This parameter should be set based on the hardware configuration and memory management pattern of users. +- We highly recommend to use TCMalloc for high performance memory allocation and better cache locality. For example, on Ubuntu 22.4, you can run: + +.. code-block:: console + + $ sudo apt-get install libtcmalloc-minimal4 # install TCMalloc library + $ find / -name *libtcmalloc* # find the dynamic link library path + $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:$LD_PRELOAD # prepend the library to LD_PRELOAD + $ python examples/offline_inference.py # run vLLM + - vLLM CPU backend uses OpenMP for thread-parallel computation. If you want the best performance on CPU, it will be very critical to isolate CPU cores for OpenMP threads with other thread pools (like web-service event-loop), to avoid CPU oversubscription. - If using vLLM CPU backend on a bare-metal machine, it is recommended to disable the hyper-threading. diff --git a/docs/source/getting_started/debugging.rst b/docs/source/getting_started/debugging.rst index 3e4d0362e3a07..ff37f4e628692 100644 --- a/docs/source/getting_started/debugging.rst +++ b/docs/source/getting_started/debugging.rst @@ -8,27 +8,30 @@ Debugging hang/crash issues When an vLLM instance hangs or crashes, it is very difficult to debug the issue. But wait a minute, it is also possible that vLLM is doing something that indeed takes a long time: -- Downloading a model: do you have the model already downloaded in your disk? If not, vLLM will download the model from the internet, which can take a long time. Be sure to check the internet connection. It would be better to download the model first using `huggingface cli `_ and then use the local path to the model. This way, you can isolate the issue. -- Loading the model from disk: if the model is large, it can take a long time to load the model from disk. Please take care of the location you store the model. Some clusters have shared filesystems across nodes, e.g. distributed filesystem or network filesystem, which can be slow. It would be better to store the model in a local disk. In addition, please also watch the CPU memory usage. When the model is too large, it might take much CPU memory, which can slow down the operating system because it needs to frequently swap memory between the disk and the memory. -- Tensor parallel inference: if the model is too large to fit in a single GPU, you might want to use tensor parallelism to split the model across multiple GPUs. In that case, every process will read the whole model and split it into chunks, which makes the disk reading time even longer (proportional to the size of tensor parallelism). You can convert the model checkpoint to a sharded checkpoint using `the provided script `_ . The conversion process might take some time, but later you can load the sharded checkpoint much faster. The model loading time should remain constant regardless of the size of tensor parallelism. +- **Downloading a model**: Do you have the model already downloaded in your disk? If not, vLLM will download the model from the internet, which can take a long time. Be sure to check the internet connection. It would be better to download the model first using `huggingface-cli `_ and then use the local path to the model. This way, you can isolate the issue. +- **Loading the model from disk**: If the model is large, it can take a long time to load the model from disk. Please take care of the location you store the model. Some clusters have shared filesystems across nodes, e.g. distributed filesystem or network filesystem, which can be slow. It would be better to store the model in a local disk. In addition, please also watch the CPU memory usage. When the model is too large, it might take much CPU memory, which can slow down the operating system because it needs to frequently swap memory between the disk and the memory. +- **Tensor parallel inference**: If the model is too large to fit in a single GPU, you might want to use tensor parallelism to split the model across multiple GPUs. In that case, every process will read the whole model and split it into chunks, which makes the disk reading time even longer (proportional to the size of tensor parallelism). You can convert the model checkpoint to a sharded checkpoint using `the provided script `_ . The conversion process might take some time, but later you can load the sharded checkpoint much faster. The model loading time should remain constant regardless of the size of tensor parallelism. -If you already take care of the above issues, and the vLLM instance still hangs, with CPU and GPU utilization at near zero, it is likely that the vLLM instance is stuck somewhere. Here are some tips to help debug the issue: +If you have already taken care of the above issues, but the vLLM instance still hangs, with CPU and GPU utilization at near zero, it is likely that the vLLM instance is stuck somewhere. Here are some tips to help debug the issue: - Set the environment variable ``export VLLM_LOGGING_LEVEL=DEBUG`` to turn on more logging. - Set the environment variable ``export CUDA_LAUNCH_BLOCKING=1`` to know exactly which CUDA kernel is causing the trouble. - Set the environment variable ``export NCCL_DEBUG=TRACE`` to turn on more logging for NCCL. -- Set the environment variable ``export VLLM_TRACE_FUNCTION=1`` . All the function calls in vLLM will be recorded. Inspect these log files, and tell which function crashes or hangs. **Note: it will generate a lot of logs and slow down the system. Only use it for debugging purposes.** +- Set the environment variable ``export VLLM_TRACE_FUNCTION=1``. All the function calls in vLLM will be recorded. Inspect these log files, and tell which function crashes or hangs. + + .. warning:: + vLLM function tracing will generate a lot of logs and slow down the system. Only use it for debugging purposes. With more logging, hopefully you can find the root cause of the issue. Here are some common issues that can cause hangs: -- The network setup is incorrect. The vLLM instance cannot get the correct IP address. You can find the log such as ``DEBUG 06-10 21:32:17 parallel_state.py:88] world_size=8 rank=0 local_rank=0 distributed_init_method=tcp://xxx.xxx.xxx.xxx:54641 backend=nccl``. The IP address should be the correct one. If not, override the IP address by setting the environment variable ``export VLLM_HOST_IP=your_ip_address``. -- Hardware/driver setup is incorrect. GPU communication cannot be established. You can run a sanity check script below to see if the GPU communication is working correctly. +- **Incorrect network setup**: The vLLM instance cannot get the correct IP address. You can find the log such as ``DEBUG 06-10 21:32:17 parallel_state.py:88] world_size=8 rank=0 local_rank=0 distributed_init_method=tcp://xxx.xxx.xxx.xxx:54641 backend=nccl``. The IP address should be the correct one. If not, override the IP address by setting the environment variable ``export VLLM_HOST_IP=your_ip_address``. +- **Incorrect hardware/driver**: GPU communication cannot be established. You can run the following sanity check script to see if the GPU communication is working correctly. .. code-block:: python - # save it as `test.py`` , and run it with `NCCL_DEBUG=TRACE torchrun --nproc-per-node=8 test.py` + # save it as `test.py` , and run it with `NCCL_DEBUG=TRACE torchrun --nproc-per-node=8 test.py` # adjust `--nproc-per-node` to the number of GPUs you want to use. import torch import torch.distributed as dist @@ -39,4 +42,4 @@ Here are some common issues that can cause hangs: value = data.mean().item() assert value == dist.get_world_size() -If the problem persists, feel free to open an `issue `_ on GitHub, with a detailed description of the issue, your environment, and the logs. +If the problem persists, feel free to `open an issue on GitHub `_, with a detailed description of the issue, your environment, and the logs. diff --git a/docs/source/getting_started/tpu-installation.rst b/docs/source/getting_started/tpu-installation.rst new file mode 100644 index 0000000000000..3627600e1f23a --- /dev/null +++ b/docs/source/getting_started/tpu-installation.rst @@ -0,0 +1,75 @@ +.. _installation_tpu: + +Installation with TPU +===================== + +vLLM supports Google Cloud TPUs using PyTorch XLA. + +Requirements +------------ + +* Google Cloud TPU VM (single host) +* TPU versions: v5e, v5p, v4 +* Python: 3.10 + +Installation options: + +1. :ref:`Build a docker image with Dockerfile `. +2. :ref:`Build from source `. + +.. _build_docker_tpu: + +Build a docker image with :code:`Dockerfile.tpu` +------------------------------------------------ + +`Dockerfile.tpu `_ is provided to build a docker image with TPU support. + +.. code-block:: console + + $ docker build -f Dockerfile.tpu -t vllm-tpu . + + +You can run the docker image with the following command: + +.. code-block:: console + + $ # Make sure to add `--privileged --net host --shm-size=16G`. + $ docker run --privileged --net host --shm-size=16G -it vllm-tpu + + +.. _build_from_source_tpu: + +Build from source +----------------- + +You can also build and install the TPU backend from source. + +First, install the dependencies: + +.. code-block:: console + + $ # (Recommended) Create a new conda environment. + $ conda create -n myenv python=3.10 -y + $ conda activate myenv + + $ # Clean up the existing torch and torch-xla packages. + $ pip uninstall torch torch-xla -y + + $ # Install PyTorch and PyTorch XLA. + $ export DATE="+20240601" + $ pip install https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-nightly${DATE}-cp310-cp310-linux_x86_64.whl + $ pip install https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-nightly${DATE}-cp310-cp310-linux_x86_64.whl + + $ # Install JAX and Pallas. + $ pip install torch_xla[tpu] -f https://storage.googleapis.com/libtpu-releases/index.html + $ pip install torch_xla[pallas] -f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html -f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html + + $ # Install other build dependencies. + $ pip install packaging aiohttp + + +Next, build vLLM from source. This will only take a few seconds: + +.. code-block:: console + + $ VLLM_TARGET_DEVICE="tpu" python setup.py develop diff --git a/docs/source/index.rst b/docs/source/index.rst index 807251d029746..b7c0d5b880079 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -63,8 +63,9 @@ Documentation getting_started/installation getting_started/amd-installation - getting_started/neuron-installation getting_started/cpu-installation + getting_started/neuron-installation + getting_started/tpu-installation getting_started/quickstart getting_started/debugging getting_started/examples/examples_index diff --git a/docs/source/models/vlm.rst b/docs/source/models/vlm.rst index 5ab4157cb3587..70ac82e2005b9 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/models/vlm.rst @@ -20,9 +20,9 @@ The following :ref:`engine arguments ` are specific to VLMs: Currently, the support for vision language models on vLLM has the following limitations: * Only single image input is supported per text prompt. - * Dynamic ``image_input_shape`` is not supported: the input image will be resized to the static ``image_input_shape``. This means model output might not exactly match the HuggingFace implementation. + * Dynamic ``image_input_shape`` is not supported: the input image will be resized to the static ``image_input_shape``. This means our LLaVA-NeXT output may not exactly match the huggingface implementation. - We are continuously improving user & developer experience for VLMs. Please raise an issue on GitHub if you have any feedback or feature requests. + We are continuously improving user & developer experience for VLMs. Please `open an issue on GitHub `_ if you have any feedback or feature requests. Offline Batched Inference ------------------------- diff --git a/examples/fp8/quantizer/quantize.py b/examples/fp8/quantizer/quantize.py index cee13b4c9c863..15f1a06b1219b 100644 --- a/examples/fp8/quantizer/quantize.py +++ b/examples/fp8/quantizer/quantize.py @@ -332,7 +332,7 @@ def main(args): if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument("--model_dir", + parser.add_argument("--model-dir", help="Specify where the HuggingFace model is", required=True) parser.add_argument("--device", default="cuda") @@ -346,19 +346,19 @@ def main(args): "full_prec" ], ) - parser.add_argument("--batch_size", + parser.add_argument("--batch-size", help="Batch size for calibration.", type=int, default=1) - parser.add_argument("--calib_size", + parser.add_argument("--calib-size", help="Number of samples for calibration.", type=int, default=512) - parser.add_argument("--output_dir", default="exported_model") - parser.add_argument("--tp_size", type=int, default=1) - parser.add_argument("--pp_size", type=int, default=1) - parser.add_argument("--awq_block_size", type=int, default=128) - parser.add_argument("--kv_cache_dtype", + parser.add_argument("--output-dir", default="exported_model") + parser.add_argument("--tp-size", type=int, default=1) + parser.add_argument("--pp-size", type=int, default=1) + parser.add_argument("--awq-block-size", type=int, default=128) + parser.add_argument("--kv-cache-dtype", help="KV Cache dtype.", default=None, choices=["int8", "fp8", None]) diff --git a/examples/tensorize_vllm_model.py b/examples/tensorize_vllm_model.py index 8b74ae1d75a1d..f9ed5fe08988e 100644 --- a/examples/tensorize_vllm_model.py +++ b/examples/tensorize_vllm_model.py @@ -3,18 +3,12 @@ import json import os import uuid -from functools import partial - -from tensorizer import stream_io from vllm import LLM -from vllm.distributed import (init_distributed_environment, - initialize_model_parallel) from vllm.engine.arg_utils import EngineArgs -from vllm.engine.llm_engine import LLMEngine from vllm.model_executor.model_loader.tensorizer import (TensorizerArgs, TensorizerConfig, - serialize_vllm_model) + tensorize_vllm_model) # yapf conflicts with isort for this docstring # yapf: disable @@ -61,6 +55,12 @@ You can also provide a `--keyfile` argument to decrypt the model weights if they were serialized with encryption. +To support distributed tensor-parallel models, each model shard will be +serialized to a separate file. The tensorizer_uri is then specified as a string +template with a format specifier such as '%03d' that will be rendered with the +shard's rank. Sharded models serialized with this script will be named as +model-rank-%03d.tensors + For more information on the available arguments for serializing, run `python -m examples.tensorize_vllm_model serialize --help`. @@ -168,77 +168,72 @@ def parse_args(): def deserialize(): llm = LLM(model=args.model, load_format="tensorizer", + tensor_parallel_size=args.tensor_parallel_size, model_loader_extra_config=tensorizer_config ) return llm +if __name__ == '__main__': + args = parse_args() -args = parse_args() - -s3_access_key_id = (getattr(args, 's3_access_key_id', None) - or os.environ.get("S3_ACCESS_KEY_ID", None)) -s3_secret_access_key = (getattr(args, 's3_secret_access_key', None) - or os.environ.get("S3_SECRET_ACCESS_KEY", None)) -s3_endpoint = (getattr(args, 's3_endpoint', None) - or os.environ.get("S3_ENDPOINT_URL", None)) - -credentials = { - "s3_access_key_id": s3_access_key_id, - "s3_secret_access_key": s3_secret_access_key, - "s3_endpoint": s3_endpoint -} + s3_access_key_id = (getattr(args, 's3_access_key_id', None) + or os.environ.get("S3_ACCESS_KEY_ID", None)) + s3_secret_access_key = (getattr(args, 's3_secret_access_key', None) + or os.environ.get("S3_SECRET_ACCESS_KEY", None)) + s3_endpoint = (getattr(args, 's3_endpoint', None) + or os.environ.get("S3_ENDPOINT_URL", None)) -_read_stream, _write_stream = (partial( - stream_io.open_stream, - mode=mode, - s3_access_key_id=s3_access_key_id, - s3_secret_access_key=s3_secret_access_key, - s3_endpoint=s3_endpoint, -) for mode in ("rb", "wb+")) + credentials = { + "s3_access_key_id": s3_access_key_id, + "s3_secret_access_key": s3_secret_access_key, + "s3_endpoint": s3_endpoint + } -model_ref = args.model + model_ref = args.model -model_name = model_ref.split("/")[1] + model_name = model_ref.split("/")[1] -os.environ["MASTER_ADDR"] = "127.0.0.1" -os.environ["MASTER_PORT"] = "8080" + keyfile = args.keyfile if args.keyfile else None -init_distributed_environment(world_size=1, rank=0, local_rank=0) -initialize_model_parallel() + if args.model_loader_extra_config: + config = json.loads(args.model_loader_extra_config) + tensorizer_args = \ + TensorizerConfig(**config)._construct_tensorizer_args() + tensorizer_args.tensorizer_uri = args.path_to_tensors + else: + tensorizer_args = None -keyfile = args.keyfile if args.keyfile else None + if args.command == "serialize": + eng_args_dict = {f.name: getattr(args, f.name) for f in + dataclasses.fields(EngineArgs)} + engine_args = EngineArgs.from_cli_args( + argparse.Namespace(**eng_args_dict) + ) -if args.model_loader_extra_config: - config = json.loads(args.model_loader_extra_config) - tensorizer_args = TensorizerConfig(**config)._construct_tensorizer_args() - tensorizer_args.tensorizer_uri = args.path_to_tensors -else: - tensorizer_args = None - -if args.command == "serialize": - eng_args_dict = {f.name: getattr(args, f.name) for f in - dataclasses.fields(EngineArgs)} - - engine_args = EngineArgs.from_cli_args(argparse.Namespace(**eng_args_dict)) - engine = LLMEngine.from_engine_args(engine_args) + input_dir = args.serialized_directory.rstrip('/') + suffix = args.suffix if args.suffix else uuid.uuid4().hex + base_path = f"{input_dir}/vllm/{model_ref}/{suffix}" + if engine_args.tensor_parallel_size > 1: + model_path = f"{base_path}/model-rank-%03d.tensors" + else: + model_path = f"{base_path}/model.tensors" - input_dir = args.serialized_directory.rstrip('/') - suffix = args.suffix if args.suffix else uuid.uuid4().hex - base_path = f"{input_dir}/vllm/{model_ref}/{suffix}" - model_path = f"{base_path}/model.tensors" - tensorizer_config = TensorizerConfig( - tensorizer_uri=model_path, - **credentials) - serialize_vllm_model(engine, tensorizer_config, keyfile) -elif args.command == "deserialize": - if not tensorizer_args: tensorizer_config = TensorizerConfig( - tensorizer_uri=args.path_to_tensors, - encryption_keyfile = keyfile, - **credentials - ) - deserialize() -else: - raise ValueError("Either serialize or deserialize must be specified.") + tensorizer_uri=model_path, + encryption_keyfile=keyfile, + **credentials) + + tensorize_vllm_model(engine_args, tensorizer_config) + + elif args.command == "deserialize": + if not tensorizer_args: + tensorizer_config = TensorizerConfig( + tensorizer_uri=args.path_to_tensors, + encryption_keyfile = keyfile, + **credentials + ) + deserialize() + else: + raise ValueError("Either serialize or deserialize must be specified.") diff --git a/format.sh b/format.sh index 8cdb46a41fc63..784d8d565e83a 100755 --- a/format.sh +++ b/format.sh @@ -36,12 +36,12 @@ tool_version_check() { fi } -tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-dev.txt | cut -d'=' -f3)" -tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-dev.txt | cut -d'=' -f3)" -tool_version_check "mypy" "$MYPY_VERSION" "$(grep mypy requirements-dev.txt | cut -d'=' -f3)" -tool_version_check "isort" "$ISORT_VERSION" "$(grep isort requirements-dev.txt | cut -d'=' -f3)" -tool_version_check "codespell" "$CODESPELL_VERSION" "$(grep codespell requirements-dev.txt | cut -d'=' -f3)" -tool_version_check "clang-format" "$CLANGFORMAT_VERSION" "$(grep clang-format requirements-dev.txt | cut -d'=' -f3)" +tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-lint.txt | cut -d'=' -f3)" +tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-lint.txt | cut -d'=' -f3)" +tool_version_check "mypy" "$MYPY_VERSION" "$(grep mypy requirements-lint.txt | cut -d'=' -f3)" +tool_version_check "isort" "$ISORT_VERSION" "$(grep isort requirements-lint.txt | cut -d'=' -f3)" +tool_version_check "codespell" "$CODESPELL_VERSION" "$(grep codespell requirements-lint.txt | cut -d'=' -f3)" +tool_version_check "clang-format" "$CLANGFORMAT_VERSION" "$(grep clang-format requirements-lint.txt | cut -d'=' -f3)" YAPF_FLAGS=( '--recursive' diff --git a/neuralmagic/tests/test_skip_env_vars/full.txt b/neuralmagic/tests/test_skip_env_vars/full.txt index 9c6f69cacd225..1ff4589556327 100644 --- a/neuralmagic/tests/test_skip_env_vars/full.txt +++ b/neuralmagic/tests/test_skip_env_vars/full.txt @@ -4,7 +4,7 @@ TEST_BASIC_CORRECTNESS=ENABLE TEST_CORE=ENABLE TEST_DISTRIBUTED=DISABLE TEST_ENGINE=ENABLE -TEST_ENTRYPOINTS=ENABLE +TEST_ENTRYPOINTS=DISABLE TEST_KERNELS=ENABLE TEST_LORA=ENABLE TEST_METRICS=ENABLE diff --git a/requirements-common.txt b/requirements-common.txt index bf9987e3af014..32e2ebe8c6159 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -2,7 +2,7 @@ cmake >= 3.21 ninja # For faster builds. psutil sentencepiece # Required for LLaMA tokenizer. -numpy +numpy < 2.0.0 requests py-cpuinfo transformers >= 4.40.0 # Required for StarCoder2 & Llava, Llama 3. diff --git a/requirements-cpu.txt b/requirements-cpu.txt index b739642d8d344..8b7d86e686217 100644 --- a/requirements-cpu.txt +++ b/requirements-cpu.txt @@ -2,5 +2,5 @@ -r requirements-common.txt # Dependencies for x86_64 CPUs -torch == 2.3.0+cpu +torch == 2.3.1+cpu triton >= 2.2.0 # FIXME(woosuk): This is a hack to avoid import error. \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt index 587387a3d582a..421aa2e796ec5 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,41 +1,5 @@ -# formatting -yapf==0.32.0 -toml==0.10.2 -tomli==2.0.1 -ruff==0.1.5 -codespell==2.2.6 -isort==5.13.2 -clang-format==18.1.5 +-r requirements-lint.txt +-r requirements-test.txt -# type checking -mypy==1.9.0 -types-PyYAML -types-requests==2.31.0.2 -types-setuptools - -# testing -autoawq -datasets -pytest -tensorizer>=2.9.0 -pytest-forked -pytest-asyncio -pytest-rerunfailures -pytest-shard - -# testing utils -awscli -einops # required for MPT -httpx -peft -requests==2.31 -ray -sentence-transformers # required for embedding -optimum # required for hf gptq baselines -auto-gptq # required for hf gptq baselines - -# Benchmarking -aiohttp - -# quantization -bitsandbytes==0.42.0 +# Avoid adding requirements directly to this file. +# Instead, modify the two files referenced above. diff --git a/requirements-lint.txt b/requirements-lint.txt new file mode 100644 index 0000000000000..0f0300ec0a5c6 --- /dev/null +++ b/requirements-lint.txt @@ -0,0 +1,14 @@ +# formatting +yapf==0.32.0 +toml==0.10.2 +tomli==2.0.1 +ruff==0.1.5 +codespell==2.3.0 +isort==5.13.2 +clang-format==18.1.5 + +# type checking +mypy==1.9.0 +types-PyYAML +types-requests==2.31.0.2 +types-setuptools diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000000000..f92975caa61fc --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,24 @@ +# testing +pytest +tensorizer>=2.9.0 +pytest-forked +pytest-asyncio +pytest-rerunfailures +pytest-shard + +# testing utils +awscli +einops # required for MPT +httpx +peft +requests==2.31 # required for python 3.8 testing +ray +sentence-transformers # required for embedding +optimum # required for hf gptq baselines +auto-gptq # required for hf gptq baselines + +# Benchmarking +aiohttp + +# quantization +bitsandbytes==0.42.0 diff --git a/requirements-tpu.txt b/requirements-tpu.txt new file mode 100644 index 0000000000000..22487f5524dd7 --- /dev/null +++ b/requirements-tpu.txt @@ -0,0 +1,7 @@ +# Common dependencies +-r requirements-common.txt + +# Dependencies for TPU +# Currently, the TPU backend uses a nightly version of PyTorch XLA. +# You can install the dependencies in Dockerfile.tpu. +triton # To avoid import errors diff --git a/setup.py b/setup.py index 61ed64d7791cd..22d33898b342d 100644 --- a/setup.py +++ b/setup.py @@ -141,6 +141,7 @@ def configure(self, ext: CMakeExtension) -> None: cmake_args += [ '-DCMAKE_CXX_COMPILER_LAUNCHER=sccache', '-DCMAKE_CUDA_COMPILER_LAUNCHER=sccache', + '-DCMAKE_C_COMPILER_LAUNCHER=sccache', ] elif is_ccache_available(): cmake_args += [ @@ -172,7 +173,6 @@ def configure(self, ext: CMakeExtension) -> None: else: # Default build tool to whatever cmake picks. build_tool = [] - subprocess.check_call( ['cmake', ext.cmake_lists_dir, *build_tool, *cmake_args], cwd=self.build_temp) @@ -207,9 +207,9 @@ def build_extensions(self) -> None: def _is_cuda() -> bool: - return VLLM_TARGET_DEVICE == "cuda" \ - and torch.version.cuda is not None \ - and not _is_neuron() + has_cuda = torch.version.cuda is not None + return (VLLM_TARGET_DEVICE == "cuda" and has_cuda + and not (_is_neuron() or _is_tpu())) def _is_hip() -> bool: @@ -226,10 +226,18 @@ def _is_neuron() -> bool: return torch_neuronx_installed or VLLM_TARGET_DEVICE == "neuron" +def _is_tpu() -> bool: + return VLLM_TARGET_DEVICE == "tpu" + + def _is_cpu() -> bool: return VLLM_TARGET_DEVICE == "cpu" +def _build_custom_ops() -> bool: + return _is_cuda() or _is_hip() or _is_cpu() + + def _install_punica() -> bool: return envs.VLLM_INSTALL_PUNICA_KERNELS @@ -321,7 +329,7 @@ def get_nm_vllm_package_name() -> str: def get_vllm_version() -> str: - version = find_version(get_path("vllm", "__init__.py")) + version = find_version(get_path("vllm", "version.py")) nm_release_type = os.getenv(NM_RELEASE_TYPE) if nm_release_type != 'RELEASE': @@ -345,6 +353,8 @@ def get_vllm_version() -> str: if neuron_version != MAIN_CUDA_VERSION: neuron_version_str = neuron_version.replace(".", "")[:3] version += f"+neuron{neuron_version_str}" + elif _is_tpu(): + version += "+tpu" elif _is_cpu(): version += "+cpu" else: @@ -392,6 +402,8 @@ def _read_requirements(filename: str) -> List[str]: requirements = _read_requirements("requirements-rocm.txt") elif _is_neuron(): requirements = _read_requirements("requirements-neuron.txt") + elif _is_tpu(): + requirements = _read_requirements("requirements-tpu.txt") elif _is_cpu(): requirements = _read_requirements("requirements-cpu.txt") else: @@ -405,7 +417,7 @@ def _read_requirements(filename: str) -> List[str]: if _is_cuda() or _is_hip(): ext_modules.append(CMakeExtension(name="vllm._moe_C")) -if not _is_neuron(): +if _build_custom_ops(): ext_modules.append(CMakeExtension(name="vllm._C")) if _install_punica(): @@ -468,6 +480,6 @@ def _read_requirements(filename: str) -> List[str]: "sparse": _sparsity_deps, "sparsity": _sparsity_deps, }, - cmdclass={"build_ext": cmake_build_ext} if not _is_neuron() else {}, + cmdclass={"build_ext": cmake_build_ext} if _build_custom_ops() else {}, package_data=package_data, ) diff --git a/tests/async_engine/test_openapi_server_ray.py b/tests/async_engine/test_openapi_server_ray.py index 60c45388c53a8..c5e5a42bba8e5 100644 --- a/tests/async_engine/test_openapi_server_ray.py +++ b/tests/async_engine/test_openapi_server_ray.py @@ -5,20 +5,34 @@ import ray from tests.nm_utils.utils_skip import should_skip_test_group -from tests.utils import ServerRunner +from tests.utils import VLLM_PATH, RemoteOpenAIServer if should_skip_test_group(group_name="TEST_ASYNC_ENGINE"): pytest.skip("TEST_ASYNC_ENGINE=DISABLE, skipping async engine test group", allow_module_level=True) +# TODO: @robertgshaw2 +# Remove this once we land the single whl - failing due to size of workspace +# being moved into ray. +if should_skip_test_group(group_name="TEST_ENTRYPOINTS"): + pytest.skip( + "TEST_ENTRYPOINTS=DISABLE, skipping entrypoints engine test group", + allow_module_level=True) + # any model with a chat template should work here MODEL_NAME = "facebook/opt-125m" @pytest.fixture(scope="module") -def server(): - ray.init() - server_runner = ServerRunner.remote([ +def ray_ctx(): + ray.init(runtime_env={"working_dir": VLLM_PATH}) + yield + ray.shutdown() + + +@pytest.fixture(scope="module") +def server(ray_ctx): + return RemoteOpenAIServer([ "--model", MODEL_NAME, # use half precision for speed and memory savings in CI environment @@ -29,22 +43,15 @@ def server(): "--enforce-eager", "--engine-use-ray" ]) - ray.get(server_runner.ready.remote()) - yield server_runner - ray.shutdown() @pytest.fixture(scope="module") -def client(): - client = openai.AsyncOpenAI( - base_url="http://localhost:8000/v1", - api_key="token-abc123", - ) - yield client +def client(server): + return server.get_async_client() @pytest.mark.asyncio -async def test_check_models(server, client: openai.AsyncOpenAI): +async def test_check_models(client: openai.AsyncOpenAI): models = await client.models.list() models = models.data served_model = models[0] @@ -53,7 +60,7 @@ async def test_check_models(server, client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_single_completion(server, client: openai.AsyncOpenAI): +async def test_single_completion(client: openai.AsyncOpenAI): completion = await client.completions.create(model=MODEL_NAME, prompt="Hello, my name is", max_tokens=5, @@ -77,7 +84,7 @@ async def test_single_completion(server, client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_single_chat_session(server, client: openai.AsyncOpenAI): +async def test_single_chat_session(client: openai.AsyncOpenAI): messages = [{ "role": "system", "content": "you are a helpful assistant" diff --git a/tests/conftest.py b/tests/conftest.py index 31c6f12f5c030..2c72094f5005a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,8 +2,6 @@ import gc import logging import os -import subprocess -import sys from typing import Any, Dict, List, Optional, Tuple, TypeVar import pytest @@ -17,13 +15,14 @@ from tests.nm_utils.logging import make_logger from vllm import LLM, SamplingParams from vllm.config import TokenizerPoolConfig, VisionLanguageConfig -from vllm.distributed import destroy_model_parallel +from vllm.distributed import (destroy_distributed_environment, + destroy_model_parallel) from vllm.inputs import TextPrompt from vllm.logger import init_logger from vllm.multimodal import MultiModalData from vllm.multimodal.image import ImageFeatureData, ImagePixelData from vllm.sequence import SampleLogprobs -from vllm.utils import is_cpu +from vllm.utils import cuda_device_count_stateless, is_cpu logger = init_logger(__name__) @@ -56,6 +55,7 @@ def _read_prompts(filename: str) -> List[str]: def cleanup(): destroy_model_parallel() + destroy_distributed_environment() with contextlib.suppress(AssertionError): torch.distributed.destroy_process_group() gc.collect() @@ -767,18 +767,7 @@ def num_gpus_available(): """Get number of GPUs without initializing the CUDA context in current process.""" - try: - out = subprocess.run([ - sys.executable, "-c", - "import torch; print(torch.cuda.device_count())" - ], - capture_output=True, - check=True, - text=True) - except subprocess.CalledProcessError as e: - logger.warning("Failed to get number of GPUs.", exc_info=e) - return 0 - return int(out.stdout.strip()) + return cuda_device_count_stateless() @pytest.fixture(scope="session") diff --git a/tests/core/test_chunked_prefill_scheduler.py b/tests/core/test_chunked_prefill_scheduler.py index 8fce7b4364298..5137f1644194e 100644 --- a/tests/core/test_chunked_prefill_scheduler.py +++ b/tests/core/test_chunked_prefill_scheduler.py @@ -154,7 +154,7 @@ def test_complex(): # Only the first seq group has a new token appended. append_new_token(running[0], 1) - # Add 2 more requsets. + # Add 2 more requests. for i in range(2, 4): _, seq_group = create_dummy_prompt(str(i), prompt_length=60) scheduler.add_seq_group(seq_group) diff --git a/tests/distributed/test_basic_distributed_correctness.py b/tests/distributed/test_basic_distributed_correctness.py index aeb4057ebe34e..0734a70a11f97 100644 --- a/tests/distributed/test_basic_distributed_correctness.py +++ b/tests/distributed/test_basic_distributed_correctness.py @@ -34,6 +34,9 @@ VLLM_ATTENTION_BACKEND = "VLLM_ATTENTION_BACKEND" +@pytest.mark.skip("Upstream test that compares 'golden' results from fp16 " + "model with TP, which is an invalid test strategy due to " + "numerical precision on GPU.") @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need at least 2 GPUs to run the test.") @pytest.mark.parametrize("model", MODELS) diff --git a/tests/distributed/test_chunked_prefill_distributed.py b/tests/distributed/test_chunked_prefill_distributed.py index c8d1147238f52..5e931284f1c4f 100644 --- a/tests/distributed/test_chunked_prefill_distributed.py +++ b/tests/distributed/test_chunked_prefill_distributed.py @@ -32,6 +32,9 @@ DISTRIBUTED_EXECUTOR_BACKEND = "DISTRIBUTED_EXECUTOR_BACKEND" +@pytest.mark.skip("Upstream test that compares 'golden' results from fp16 " + "model with TP, which is an invalid test strategy due to " + "numerical precision on GPU.") @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need at least 2 GPUs to run the test.") @pytest.mark.parametrize("model", MODELS) diff --git a/tests/distributed/test_custom_all_reduce.py b/tests/distributed/test_custom_all_reduce.py index 5f77ed7539979..6a99050316588 100644 --- a/tests/distributed/test_custom_all_reduce.py +++ b/tests/distributed/test_custom_all_reduce.py @@ -10,9 +10,9 @@ from tests.utils import (init_test_distributed_environment, multi_process_tensor_parallel) from vllm.distributed.communication_op import ( # noqa - graph_capture, tensor_model_parallel_all_reduce) + tensor_model_parallel_all_reduce) from vllm.distributed.parallel_state import (get_tensor_model_parallel_group, - get_tp_ca_communicator) + get_tp_group, graph_capture) if should_skip_test_group(group_name="TEST_DISTRIBUTED"): pytest.skip("TEST_DISTRIBUTED=DISABLE, skipping distributed test group", @@ -95,7 +95,7 @@ def eager_allreduce(tp_size, pp_size, rank, distributed_init_port): # communicate independently num_communication = rank // tp_size + 1 sz = 1024 - fa = get_tp_ca_communicator() + fa = get_tp_group().ca_comm inp = torch.ones(sz, dtype=torch.float32, device=device) out = inp for _ in range(num_communication): diff --git a/tests/distributed/test_pynccl.py b/tests/distributed/test_pynccl.py index b5dca7f8a82fd..27bcd88eadc61 100644 --- a/tests/distributed/test_pynccl.py +++ b/tests/distributed/test_pynccl.py @@ -7,10 +7,11 @@ from tests.nm_utils.utils_skip import should_skip_test_group from vllm.distributed.communication_op import ( # noqa - graph_capture, tensor_model_parallel_all_reduce) + tensor_model_parallel_all_reduce) from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator from vllm.distributed.device_communicators.pynccl_wrapper import NCCLLibrary from vllm.distributed.parallel_state import (ensure_model_parallel_initialized, + get_world_group, graph_capture, init_distributed_environment) from vllm.utils import update_environment_variables @@ -58,7 +59,8 @@ def wrapped_fn(env): @worker_fn_wrapper def worker_fn(): - pynccl_comm = PyNcclCommunicator() + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) tensor = torch.ones(16, 1024, 1024, dtype=torch.float32).cuda(pynccl_comm.rank) with pynccl_comm.change_state(enable=True): @@ -134,7 +136,8 @@ def test_pynccl_multiple_allreduce_with_vllm(): def worker_fn_with_cudagraph(): with torch.no_grad(): graph = torch.cuda.CUDAGraph() - pynccl_comm = PyNcclCommunicator() + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) # run something in the default stream to initialize torch engine a = torch.ones((4, 4), device=f'cuda:{pynccl_comm.rank}') torch.cuda.synchronize() @@ -159,7 +162,8 @@ def test_pynccl_with_cudagraph(): @worker_fn_wrapper def send_recv_worker_fn(): - pynccl_comm = PyNcclCommunicator() + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) if pynccl_comm.rank == 0: tensor = torch.ones(16, 1024, 1024, dtype=torch.float32).cuda(pynccl_comm.rank) diff --git a/tests/distributed/test_same_node.py b/tests/distributed/test_same_node.py index 4880bab79069c..d0517be3d102a 100644 --- a/tests/distributed/test_same_node.py +++ b/tests/distributed/test_same_node.py @@ -1,3 +1,8 @@ +# UPSTREAM SYNC: +# Since this test is launched with torchrun, pytest.skip +# an importing from test directory is having trouble. +# So, we can have the should_skip_test_group logic here. + import os import torch diff --git a/tests/distributed/test_utils.py b/tests/distributed/test_utils.py new file mode 100644 index 0000000000000..6746ffe97bafa --- /dev/null +++ b/tests/distributed/test_utils.py @@ -0,0 +1,37 @@ +import os + +import pytest +import ray + +from tests.nm_utils.utils_skip import should_skip_test_group +from vllm.utils import cuda_device_count_stateless + +if should_skip_test_group(group_name="TEST_DISTRIBUTED"): + pytest.skip("TEST_DISTRIBUTED=DISABLE, skipping distributed test group", + allow_module_level=True) + + +@ray.remote +class _CUDADeviceCountStatelessTestActor(): + + def get_count(self): + return cuda_device_count_stateless() + + def set_cuda_visible_devices(self, cuda_visible_devices: str): + os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices + + def get_cuda_visible_devices(self): + return os.environ["CUDA_VISIBLE_DEVICES"] + + +def test_cuda_device_count_stateless(): + """Test that cuda_device_count_stateless changes return value if + CUDA_VISIBLE_DEVICES is changed.""" + + actor = _CUDADeviceCountStatelessTestActor.options(num_gpus=2).remote() + assert ray.get(actor.get_cuda_visible_devices.remote()) == "0,1" + assert ray.get(actor.get_count.remote()) == 2 + ray.get(actor.set_cuda_visible_devices.remote("0")) + assert ray.get(actor.get_count.remote()) == 1 + ray.get(actor.set_cuda_visible_devices.remote("")) + assert ray.get(actor.get_count.remote()) == 0 diff --git a/tests/entrypoints/test_llm_generate_multiple_loras.py b/tests/entrypoints/test_llm_generate_multiple_loras.py index b429b904c7c35..8401e9836f1ac 100644 --- a/tests/entrypoints/test_llm_generate_multiple_loras.py +++ b/tests/entrypoints/test_llm_generate_multiple_loras.py @@ -4,11 +4,16 @@ # downloading lora to test lora requests from huggingface_hub import snapshot_download +from tests.nm_utils.utils_skip import should_skip_test_group from vllm import LLM from vllm.lora.request import LoRARequest from ..conftest import cleanup +if should_skip_test_group(group_name="TEST_ENTRYPOINTS"): + pytest.skip("TEST_ENTRYPOINTS=DISABLE, skipping entrypoints group", + allow_module_level=True) + MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" PROMPTS = [ diff --git a/tests/entrypoints/test_openai_embedding.py b/tests/entrypoints/test_openai_embedding.py new file mode 100644 index 0000000000000..8ace215c7fe71 --- /dev/null +++ b/tests/entrypoints/test_openai_embedding.py @@ -0,0 +1,119 @@ +import openai +import pytest +import ray + +from tests.nm_utils.utils_skip import should_skip_test_group + +from ..utils import VLLM_PATH, RemoteOpenAIServer + +if should_skip_test_group(group_name="TEST_ENTRYPOINTS"): + pytest.skip("TEST_ENTRYPOINTS=DISABLE, skipping entrypoints group", + allow_module_level=True) + +EMBEDDING_MODEL_NAME = "intfloat/e5-mistral-7b-instruct" + +pytestmark = pytest.mark.openai + + +@pytest.fixture(scope="module") +def ray_ctx(): + ray.init(runtime_env={"working_dir": VLLM_PATH}) + yield + ray.shutdown() + + +@pytest.fixture(scope="module") +def embedding_server(ray_ctx): + return RemoteOpenAIServer([ + "--model", + EMBEDDING_MODEL_NAME, + # use half precision for speed and memory savings in CI environment + "--dtype", + "bfloat16", + "--enforce-eager", + "--max-model-len", + "8192", + "--enforce-eager", + ]) + + +@pytest.mark.asyncio +@pytest.fixture(scope="module") +def embedding_client(embedding_server): + return embedding_server.get_async_client() + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "model_name", + [EMBEDDING_MODEL_NAME], +) +async def test_single_embedding(embedding_client: openai.AsyncOpenAI, + model_name: str): + input_texts = [ + "The chef prepared a delicious meal.", + ] + + # test single embedding + embeddings = await embedding_client.embeddings.create( + model=model_name, + input=input_texts, + encoding_format="float", + ) + assert embeddings.id is not None + assert len(embeddings.data) == 1 + assert len(embeddings.data[0].embedding) == 4096 + assert embeddings.usage.completion_tokens == 0 + assert embeddings.usage.prompt_tokens == 9 + assert embeddings.usage.total_tokens == 9 + + # test using token IDs + input_tokens = [1, 1, 1, 1, 1] + embeddings = await embedding_client.embeddings.create( + model=model_name, + input=input_tokens, + encoding_format="float", + ) + assert embeddings.id is not None + assert len(embeddings.data) == 1 + assert len(embeddings.data[0].embedding) == 4096 + assert embeddings.usage.completion_tokens == 0 + assert embeddings.usage.prompt_tokens == 5 + assert embeddings.usage.total_tokens == 5 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "model_name", + [EMBEDDING_MODEL_NAME], +) +async def test_batch_embedding(embedding_client: openai.AsyncOpenAI, + model_name: str): + # test List[str] + input_texts = [ + "The cat sat on the mat.", "A feline was resting on a rug.", + "Stars twinkle brightly in the night sky." + ] + embeddings = await embedding_client.embeddings.create( + model=model_name, + input=input_texts, + encoding_format="float", + ) + assert embeddings.id is not None + assert len(embeddings.data) == 3 + assert len(embeddings.data[0].embedding) == 4096 + + # test List[List[int]] + input_tokens = [[4, 5, 7, 9, 20], [15, 29, 499], [24, 24, 24, 24, 24], + [25, 32, 64, 77]] + embeddings = await embedding_client.embeddings.create( + model=model_name, + input=input_tokens, + encoding_format="float", + ) + assert embeddings.id is not None + assert len(embeddings.data) == 4 + assert len(embeddings.data[0].embedding) == 4096 + assert embeddings.usage.completion_tokens == 0 + assert embeddings.usage.prompt_tokens == 17 + assert embeddings.usage.total_tokens == 17 diff --git a/tests/entrypoints/test_openai_server.py b/tests/entrypoints/test_openai_server.py index d23705e46be70..58466e6bdd363 100644 --- a/tests/entrypoints/test_openai_server.py +++ b/tests/entrypoints/test_openai_server.py @@ -14,7 +14,7 @@ from openai import BadRequestError from tests.nm_utils.utils_skip import should_skip_test_group -from tests.utils import ServerRunner +from tests.utils import VLLM_PATH, RemoteOpenAIServer from vllm.transformers_utils.tokenizer import get_tokenizer if should_skip_test_group(group_name="TEST_ENTRYPOINTS"): @@ -23,7 +23,6 @@ # any model with a chat template should work here MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" -EMBEDDING_MODEL_NAME = "intfloat/e5-mistral-7b-instruct" # technically this needs Mistral-7B-v0.1 as base, but we're not testing # generation quality here LORA_NAME = "typeof/zephyr-7b-beta-lora" @@ -84,9 +83,15 @@ def zephyr_lora_files(): @pytest.fixture(scope="module") -def server(zephyr_lora_files): - ray.init(ignore_reinit_error=True) - server_runner = ServerRunner.remote([ +def ray_ctx(): + ray.init(runtime_env={"working_dir": VLLM_PATH}) + yield + ray.shutdown() + + +@pytest.fixture(scope="module") +def server(zephyr_lora_files, ray_ctx): + return RemoteOpenAIServer([ "--model", MODEL_NAME, # use half precision for speed and memory savings in CI environment @@ -95,8 +100,6 @@ def server(zephyr_lora_files): "--max-model-len", "8192", "--enforce-eager", - "--gpu-memory-utilization", - "0.75", # lora config below "--enable-lora", "--lora-modules", @@ -109,43 +112,14 @@ def server(zephyr_lora_files): "--max-num-seqs", "128", ]) - ray.get(server_runner.ready.remote()) - yield server_runner - ray.shutdown() @pytest.fixture(scope="module") -def embedding_server(zephyr_lora_files): - ray.shutdown() - ray.init() - server_runner = ServerRunner.remote([ - "--model", - EMBEDDING_MODEL_NAME, - # use half precision for speed and memory savings in CI environment - "--dtype", - "bfloat16", - "--enforce-eager", - "--gpu-memory-utilization", - "0.75", - "--max-model-len", - "8192", - ]) - ray.get(server_runner.ready.remote()) - yield server_runner - ray.shutdown() - - -@pytest.fixture(scope="module") -def client(): - client = openai.AsyncOpenAI( - base_url="http://localhost:8000/v1", - api_key="token-abc123", - ) - yield client +def client(server): + return server.get_async_client() -@pytest.mark.asyncio -async def test_check_models(server, client: openai.AsyncOpenAI): +async def test_check_models(client: openai.AsyncOpenAI): models = await client.models.list() models = models.data served_model = models[0] @@ -162,8 +136,7 @@ async def test_check_models(server, client: openai.AsyncOpenAI): "model_name", [MODEL_NAME, "zephyr-lora", "zephyr-lora2"], ) -async def test_single_completion(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_single_completion(client: openai.AsyncOpenAI, model_name: str): completion = await client.completions.create(model=model_name, prompt="Hello, my name is", max_tokens=5, @@ -194,8 +167,7 @@ async def test_single_completion(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora", "zephyr-lora2"], ) -async def test_no_logprobs(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_no_logprobs(client: openai.AsyncOpenAI, model_name: str): # test using token IDs completion = await client.completions.create( model=MODEL_NAME, @@ -214,8 +186,7 @@ async def test_no_logprobs(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_zero_logprobs(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_zero_logprobs(client: openai.AsyncOpenAI, model_name: str): # test using token IDs completion = await client.completions.create( model=MODEL_NAME, @@ -236,8 +207,7 @@ async def test_zero_logprobs(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_some_logprobs(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_some_logprobs(client: openai.AsyncOpenAI, model_name: str): # test using token IDs completion = await client.completions.create( model=MODEL_NAME, @@ -258,7 +228,7 @@ async def test_some_logprobs(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_too_many_completion_logprobs(server, client: openai.AsyncOpenAI, +async def test_too_many_completion_logprobs(client: openai.AsyncOpenAI, model_name: str): with pytest.raises( @@ -304,8 +274,7 @@ async def test_too_many_completion_logprobs(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora", "zephyr-lora2"], ) -async def test_no_logprobs_chat(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_no_logprobs_chat(client: openai.AsyncOpenAI, model_name: str): messages = [{ "role": "system", "content": "you are a helpful assistant" @@ -330,8 +299,7 @@ async def test_no_logprobs_chat(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_zero_logprobs_chat(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_zero_logprobs_chat(client: openai.AsyncOpenAI, model_name: str): messages = [{ "role": "system", "content": "you are a helpful assistant" @@ -358,8 +326,7 @@ async def test_zero_logprobs_chat(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_some_logprobs_chat(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_some_logprobs_chat(client: openai.AsyncOpenAI, model_name: str): messages = [{ "role": "system", "content": "you are a helpful assistant" @@ -386,7 +353,7 @@ async def test_some_logprobs_chat(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_too_many_chat_logprobs(server, client: openai.AsyncOpenAI, +async def test_too_many_chat_logprobs(client: openai.AsyncOpenAI, model_name: str): messages = [{ "role": "system", @@ -429,7 +396,7 @@ async def test_too_many_chat_logprobs(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_single_chat_session(server, client: openai.AsyncOpenAI, +async def test_single_chat_session(client: openai.AsyncOpenAI, model_name: str): messages = [{ "role": "system", @@ -474,7 +441,7 @@ async def test_single_chat_session(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_completion_streaming(server, client: openai.AsyncOpenAI, +async def test_completion_streaming(client: openai.AsyncOpenAI, model_name: str): prompt = "What is an LLM?" @@ -509,8 +476,7 @@ async def test_completion_streaming(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_chat_streaming(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_chat_streaming(client: openai.AsyncOpenAI, model_name: str): messages = [{ "role": "system", "content": "you are a helpful assistant" @@ -559,8 +525,7 @@ async def test_chat_streaming(server, client: openai.AsyncOpenAI, "model_name", ["HuggingFaceH4/zephyr-7b-beta", "zephyr-lora"], ) -async def test_chat_completion_stream_options(server, - client: openai.AsyncOpenAI, +async def test_chat_completion_stream_options(client: openai.AsyncOpenAI, model_name: str): messages = [{ "role": "system", @@ -630,7 +595,7 @@ async def test_chat_completion_stream_options(server, "model_name", ["HuggingFaceH4/zephyr-7b-beta", "zephyr-lora"], ) -async def test_completion_stream_options(server, client: openai.AsyncOpenAI, +async def test_completion_stream_options(client: openai.AsyncOpenAI, model_name: str): prompt = "What is the capital of France?" @@ -692,8 +657,7 @@ async def test_completion_stream_options(server, client: openai.AsyncOpenAI, "model_name", [MODEL_NAME, "zephyr-lora"], ) -async def test_batch_completions(server, client: openai.AsyncOpenAI, - model_name: str): +async def test_batch_completions(client: openai.AsyncOpenAI, model_name: str): # test simple list batch = await client.completions.create( model=model_name, @@ -741,7 +705,7 @@ async def test_batch_completions(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio -async def test_logits_bias(server, client: openai.AsyncOpenAI): +async def test_logits_bias(client: openai.AsyncOpenAI): prompt = "Hello, my name is" max_tokens = 5 tokenizer = get_tokenizer(tokenizer_name=MODEL_NAME) @@ -790,7 +754,7 @@ async def test_logits_bias(server, client: openai.AsyncOpenAI): @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_json_completion(server, client: openai.AsyncOpenAI, +async def test_guided_json_completion(client: openai.AsyncOpenAI, guided_decoding_backend: str): completion = await client.completions.create( model=MODEL_NAME, @@ -812,7 +776,7 @@ async def test_guided_json_completion(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_json_chat(server, client: openai.AsyncOpenAI, +async def test_guided_json_chat(client: openai.AsyncOpenAI, guided_decoding_backend: str): messages = [{ "role": "system", @@ -859,7 +823,7 @@ async def test_guided_json_chat(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_regex_completion(server, client: openai.AsyncOpenAI, +async def test_guided_regex_completion(client: openai.AsyncOpenAI, guided_decoding_backend: str): completion = await client.completions.create( model=MODEL_NAME, @@ -879,7 +843,7 @@ async def test_guided_regex_completion(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_regex_chat(server, client: openai.AsyncOpenAI, +async def test_guided_regex_chat(client: openai.AsyncOpenAI, guided_decoding_backend: str): messages = [{ "role": "system", @@ -917,7 +881,7 @@ async def test_guided_regex_chat(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_choice_completion(server, client: openai.AsyncOpenAI, +async def test_guided_choice_completion(client: openai.AsyncOpenAI, guided_decoding_backend: str): completion = await client.completions.create( model=MODEL_NAME, @@ -937,7 +901,7 @@ async def test_guided_choice_completion(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_choice_chat(server, client: openai.AsyncOpenAI, +async def test_guided_choice_chat(client: openai.AsyncOpenAI, guided_decoding_backend: str): messages = [{ "role": "system", @@ -976,7 +940,7 @@ async def test_guided_choice_chat(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_decoding_type_error(server, client: openai.AsyncOpenAI, +async def test_guided_decoding_type_error(client: openai.AsyncOpenAI, guided_decoding_backend: str): with pytest.raises(openai.BadRequestError): _ = await client.completions.create( @@ -1012,7 +976,7 @@ async def test_guided_decoding_type_error(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_guided_choice_chat_logprobs(server, client: openai.AsyncOpenAI, +async def test_guided_choice_chat_logprobs(client: openai.AsyncOpenAI, guided_decoding_backend: str): messages = [{ "role": "system", @@ -1044,7 +1008,7 @@ async def test_guided_choice_chat_logprobs(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines", "lm-format-enforcer"]) -async def test_named_tool_use(server, client: openai.AsyncOpenAI, +async def test_named_tool_use(client: openai.AsyncOpenAI, guided_decoding_backend: str): messages = [{ "role": "system", @@ -1135,7 +1099,7 @@ async def test_named_tool_use(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines"]) async def test_required_tool_use_not_yet_supported( - server, client: openai.AsyncOpenAI, guided_decoding_backend: str): + client: openai.AsyncOpenAI, guided_decoding_backend: str): messages = [{ "role": "system", "content": "you are a helpful assistant" @@ -1181,7 +1145,7 @@ async def test_required_tool_use_not_yet_supported( @pytest.mark.asyncio @pytest.mark.parametrize("guided_decoding_backend", ["outlines"]) async def test_inconsistent_tool_choice_and_tools( - server, client: openai.AsyncOpenAI, guided_decoding_backend: str): + client: openai.AsyncOpenAI, guided_decoding_backend: str): messages = [{ "role": "system", "content": "you are a helpful assistant" @@ -1227,7 +1191,7 @@ async def test_inconsistent_tool_choice_and_tools( @pytest.mark.asyncio -async def test_response_format_json_object(server, client: openai.AsyncOpenAI): +async def test_response_format_json_object(client: openai.AsyncOpenAI): for _ in range(2): resp = await client.chat.completions.create( model=MODEL_NAME, @@ -1247,7 +1211,7 @@ async def test_response_format_json_object(server, client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_extra_fields(server, client: openai.AsyncOpenAI): +async def test_extra_fields(client: openai.AsyncOpenAI): with pytest.raises(BadRequestError) as exc_info: await client.chat.completions.create( model=MODEL_NAME, @@ -1263,7 +1227,7 @@ async def test_extra_fields(server, client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_complex_message_content(server, client: openai.AsyncOpenAI): +async def test_complex_message_content(client: openai.AsyncOpenAI): resp = await client.chat.completions.create( model=MODEL_NAME, messages=[{ @@ -1283,7 +1247,7 @@ async def test_complex_message_content(server, client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_custom_role(server, client: openai.AsyncOpenAI): +async def test_custom_role(client: openai.AsyncOpenAI): # Not sure how the model handles custom roles so we just check that # both string and complex message content are handled in the same way @@ -1314,7 +1278,7 @@ async def test_custom_role(server, client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_guided_grammar(server, client: openai.AsyncOpenAI): +async def test_guided_grammar(client: openai.AsyncOpenAI): simple_sql_grammar = """ start: select_statement @@ -1355,7 +1319,7 @@ async def test_guided_grammar(server, client: openai.AsyncOpenAI): [MODEL_NAME, "zephyr-lora", "zephyr-lora2"], ) @pytest.mark.parametrize("logprobs_arg", [1, 0]) -async def test_echo_logprob_completion(server, client: openai.AsyncOpenAI, +async def test_echo_logprob_completion(client: openai.AsyncOpenAI, model_name: str, logprobs_arg: int): tokenizer = get_tokenizer(tokenizer_name=MODEL_NAME) # test using text and token IDs @@ -1384,7 +1348,7 @@ async def test_echo_logprob_completion(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio -async def test_long_seed(server, client: openai.AsyncOpenAI): +async def test_long_seed(client: openai.AsyncOpenAI): for seed in [ torch.iinfo(torch.long).min - 1, torch.iinfo(torch.long).max + 1 @@ -1403,81 +1367,5 @@ async def test_long_seed(server, client: openai.AsyncOpenAI): or "less_than_equal" in exc_info.value.message) -@pytest.mark.asyncio -@pytest.mark.parametrize( - "model_name", - [EMBEDDING_MODEL_NAME], -) -async def test_single_embedding(embedding_server, client: openai.AsyncOpenAI, - model_name: str): - input_texts = [ - "The chef prepared a delicious meal.", - ] - - # test single embedding - embeddings = await client.embeddings.create( - model=model_name, - input=input_texts, - encoding_format="float", - ) - assert embeddings.id is not None - assert len(embeddings.data) == 1 - assert len(embeddings.data[0].embedding) == 4096 - assert embeddings.usage.completion_tokens == 0 - assert embeddings.usage.prompt_tokens == 9 - assert embeddings.usage.total_tokens == 9 - - # test using token IDs - input_tokens = [1, 1, 1, 1, 1] - embeddings = await client.embeddings.create( - model=model_name, - input=input_tokens, - encoding_format="float", - ) - assert embeddings.id is not None - assert len(embeddings.data) == 1 - assert len(embeddings.data[0].embedding) == 4096 - assert embeddings.usage.completion_tokens == 0 - assert embeddings.usage.prompt_tokens == 5 - assert embeddings.usage.total_tokens == 5 - - -@pytest.mark.asyncio -@pytest.mark.parametrize( - "model_name", - [EMBEDDING_MODEL_NAME], -) -async def test_batch_embedding(embedding_server, client: openai.AsyncOpenAI, - model_name: str): - # test List[str] - input_texts = [ - "The cat sat on the mat.", "A feline was resting on a rug.", - "Stars twinkle brightly in the night sky." - ] - embeddings = await client.embeddings.create( - model=model_name, - input=input_texts, - encoding_format="float", - ) - assert embeddings.id is not None - assert len(embeddings.data) == 3 - assert len(embeddings.data[0].embedding) == 4096 - - # test List[List[int]] - input_tokens = [[4, 5, 7, 9, 20], [15, 29, 499], [24, 24, 24, 24, 24], - [25, 32, 64, 77]] - embeddings = await client.embeddings.create( - model=model_name, - input=input_tokens, - encoding_format="float", - ) - assert embeddings.id is not None - assert len(embeddings.data) == 4 - assert len(embeddings.data[0].embedding) == 4096 - assert embeddings.usage.completion_tokens == 0 - assert embeddings.usage.prompt_tokens == 17 - assert embeddings.usage.total_tokens == 17 - - if __name__ == "__main__": pytest.main([__file__]) diff --git a/tests/entrypoints/test_openai_vision.py b/tests/entrypoints/test_openai_vision.py index cc03b04e0b0e0..616c411714d89 100644 --- a/tests/entrypoints/test_openai_vision.py +++ b/tests/entrypoints/test_openai_vision.py @@ -6,9 +6,14 @@ import pytest_asyncio import ray +from tests.nm_utils.utils_skip import should_skip_test_group from vllm.multimodal.utils import ImageFetchAiohttp, encode_image_base64 -from ..utils import ServerRunner +from ..utils import VLLM_PATH, RemoteOpenAIServer + +if should_skip_test_group(group_name="TEST_ENTRYPOINTS"): + pytest.skip("TEST_ENTRYPOINTS=DISABLE, skipping entrypoints group", + allow_module_level=True) MODEL_NAME = "llava-hf/llava-1.5-7b-hf" LLAVA_CHAT_TEMPLATE = (Path(__file__).parent.parent.parent / @@ -25,10 +30,16 @@ pytestmark = pytest.mark.openai +@pytest.fixture(scope="module") +def ray_ctx(): + ray.init(runtime_env={"working_dir": VLLM_PATH}) + yield + ray.shutdown() + + @pytest.fixture(scope="module") def server(): - ray.init() - server_runner = ServerRunner.remote([ + return RemoteOpenAIServer([ "--model", MODEL_NAME, "--dtype", @@ -47,18 +58,11 @@ def server(): "--chat-template", str(LLAVA_CHAT_TEMPLATE), ]) - ray.get(server_runner.ready.remote()) - yield server_runner - ray.shutdown() -@pytest.fixture(scope="session") -def client(): - client = openai.AsyncOpenAI( - base_url="http://localhost:8000/v1", - api_key="token-abc123", - ) - yield client +@pytest.fixture(scope="module") +def client(server): + return server.get_async_client() @pytest_asyncio.fixture(scope="session") @@ -73,7 +77,7 @@ async def base64_encoded_image() -> Dict[str, str]: @pytest.mark.asyncio @pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) -async def test_single_chat_session_image(server, client: openai.AsyncOpenAI, +async def test_single_chat_session_image(client: openai.AsyncOpenAI, model_name: str, image_url: str): messages = [{ "role": @@ -126,7 +130,7 @@ async def test_single_chat_session_image(server, client: openai.AsyncOpenAI, @pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) async def test_single_chat_session_image_base64encoded( - server, client: openai.AsyncOpenAI, model_name: str, image_url: str, + client: openai.AsyncOpenAI, model_name: str, image_url: str, base64_encoded_image: Dict[str, str]): messages = [{ @@ -180,7 +184,7 @@ async def test_single_chat_session_image_base64encoded( @pytest.mark.asyncio @pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) -async def test_chat_streaming_image(server, client: openai.AsyncOpenAI, +async def test_chat_streaming_image(client: openai.AsyncOpenAI, model_name: str, image_url: str): messages = [{ "role": @@ -237,8 +241,8 @@ async def test_chat_streaming_image(server, client: openai.AsyncOpenAI, @pytest.mark.asyncio @pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) -async def test_multi_image_input(server, client: openai.AsyncOpenAI, - model_name: str, image_url: str): +async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str, + image_url: str): messages = [{ "role": diff --git a/tests/kernels/test_cutlass.py b/tests/kernels/test_cutlass.py index a9aeeb3a78bf5..e7368fb87b6ae 100644 --- a/tests/kernels/test_cutlass.py +++ b/tests/kernels/test_cutlass.py @@ -52,7 +52,7 @@ def cutlass_fp8_gemm_helper(m: int, scale_b = (torch.randn( (1, n_b_scales), device=device, dtype=torch.float32) / 10) - out = ops.cutlass_scaled_mm_dq(a, b, scale_a, scale_b, out_dtype) + out = ops.cutlass_scaled_mm(a, b, scale_a, scale_b, out_dtype) baseline = torch.mm(scale_a * a.to(dtype=torch.float32), scale_b * b.to(dtype=torch.float32)).to(out_dtype) @@ -79,7 +79,7 @@ def cutlass_int8_gemm_helper(m: int, scale_b = (torch.randn( (1, n_b_scales), device=device, dtype=torch.float32) / 10) - out = ops.cutlass_scaled_mm_dq(a, b, scale_a, scale_b, out_dtype) + out = ops.cutlass_scaled_mm(a, b, scale_a, scale_b, out_dtype) baseline = torch.mm(scale_a * a.to(dtype=torch.float32), scale_b * b.to(dtype=torch.float32)).to(dtype=out_dtype) @@ -205,11 +205,11 @@ def test_cutlass_subset(): scale_a = torch.randn((1, 1), device="cuda", dtype=torch.float32) / 10 scale_b = torch.randn((1, 1), device="cuda", dtype=torch.float32) / 10 - out = ops.cutlass_scaled_mm_dq(a, - b, - scale_a, - scale_b, - out_dtype=torch.bfloat16) + out = ops.cutlass_scaled_mm(a, + b, + scale_a, + scale_b, + out_dtype=torch.bfloat16) baseline = torch.mm(scale_a * a.to(dtype=torch.float32), scale_b * b.to(dtype=torch.float32)).to(dtype=torch.bfloat16) @@ -228,8 +228,8 @@ def __init__(self, b, scale_a, scale_b, out_dtype): self.out_dtype = out_dtype def forward(self, a): - return ops.cutlass_scaled_mm_dq(a, self.b, self.scale_a, self.scale_b, - self.out_dtype) + return ops.cutlass_scaled_mm(a, self.b, self.scale_a, self.scale_b, + self.out_dtype) @pytest.mark.parametrize("per_act_token", [True, False]) diff --git a/tests/lora/conftest.py b/tests/lora/conftest.py index 400333066b9fa..522c635b82d9c 100644 --- a/tests/lora/conftest.py +++ b/tests/lora/conftest.py @@ -12,7 +12,10 @@ import vllm from vllm.config import LoRAConfig -from vllm.distributed import destroy_model_parallel, initialize_model_parallel +from vllm.distributed import (destroy_distributed_environment, + destroy_model_parallel, + init_distributed_environment, + initialize_model_parallel) from vllm.model_executor.layers.linear import (ColumnParallelLinear, MergedColumnParallelLinear, RowParallelLinear) @@ -35,6 +38,7 @@ def cleanup(): destroy_model_parallel() + destroy_distributed_environment() with contextlib.suppress(AssertionError): torch.distributed.destroy_process_group() gc.collect() @@ -64,15 +68,14 @@ def cleanup_fixture(should_do_global_cleanup_after_test: bool): @pytest.fixture def dist_init(): - if not torch.distributed.is_initialized(): - temp_file = tempfile.mkstemp()[1] - torch.distributed.init_process_group( - backend="nccl", - world_size=1, - rank=0, - init_method=f"file://{temp_file}", - ) - torch.distributed.all_reduce(torch.zeros(1).cuda()) + temp_file = tempfile.mkstemp()[1] + init_distributed_environment( + world_size=1, + rank=0, + distributed_init_method=f"file://{temp_file}", + local_rank=0, + backend="nccl", + ) initialize_model_parallel(1, 1) yield cleanup() diff --git a/tests/models/test_aqlm.py b/tests/models/test_aqlm.py index e24f2632a28d0..715215cf97f8c 100644 --- a/tests/models/test_aqlm.py +++ b/tests/models/test_aqlm.py @@ -4,23 +4,14 @@ """ import pytest -import torch from tests.nm_utils.utils_skip import should_skip_test_group -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from tests.quantization.utils import is_quant_method_supported if should_skip_test_group(group_name="TEST_MODELS"): pytest.skip("TEST_MODELS=DISABLE, skipping model test group", allow_module_level=True) -aqlm_not_supported = True - -if torch.cuda.is_available(): - capability = torch.cuda.get_device_capability() - capability = capability[0] * 10 + capability[1] - aqlm_not_supported = (capability < - QUANTIZATION_METHODS["aqlm"].get_min_capability()) - # In this test we hardcode prompts and generations for the model so we don't # need to require the AQLM package as a dependency example_prompts = [ @@ -72,7 +63,7 @@ ] -@pytest.mark.skipif(aqlm_not_supported, +@pytest.mark.skipif(not is_quant_method_supported("aqlm"), reason="AQLM is not supported on this GPU type.") @pytest.mark.parametrize("model", ["ISTA-DASLab/Llama-2-7b-AQLM-2Bit-1x16-hf"]) @pytest.mark.parametrize("dtype", ["half"]) diff --git a/tests/models/test_fp8.py b/tests/models/test_fp8.py index c9daed58db6ae..f2fe000a41a5a 100644 --- a/tests/models/test_fp8.py +++ b/tests/models/test_fp8.py @@ -5,12 +5,11 @@ import os import pytest -import torch from transformers import AutoTokenizer from tests.nm_utils.utils_skip import should_skip_test_group +from tests.quantization.utils import is_quant_method_supported from vllm import LLM, SamplingParams -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS if should_skip_test_group(group_name="TEST_MODELS"): pytest.skip("TEST_MODELS=DISABLE, skipping model test group", @@ -72,16 +71,16 @@ }, } -fp8_not_supported = True -if torch.cuda.is_available(): - capability = torch.cuda.get_device_capability() - capability = capability[0] * 10 + capability[1] - fp8_not_supported = (capability < - QUANTIZATION_METHODS["fp8"].get_min_capability()) - - -@pytest.mark.skipif(fp8_not_supported, +# This test compares against golden strings for exact match since +# there is no baseline implementation to compare against +# and is unstable w.r.t specifics of the fp8 implementation or +# the hardware being run on. +# Disabled to prevent it from breaking the build +@pytest.mark.skip( + reason= + "Prevent unstable test based on golden strings from breaking the build.") +@pytest.mark.skipif(not is_quant_method_supported("fp8"), reason="fp8 is not supported on this GPU type.") @pytest.mark.parametrize("model_name", MODELS) @pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"]) diff --git a/tests/models/test_gptq_marlin.py b/tests/models/test_gptq_marlin.py index 978704cdc909c..eaf002a4ee79c 100644 --- a/tests/models/test_gptq_marlin.py +++ b/tests/models/test_gptq_marlin.py @@ -11,11 +11,10 @@ import os import pytest -import torch from tests.models.utils import check_logprobs_close from tests.nm_utils.utils_skip import should_skip_test_group -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from tests.quantization.utils import is_quant_method_supported from vllm.model_executor.layers.rotary_embedding import _ROPE_DICT if should_skip_test_group(group_name="TEST_MODELS"): @@ -26,14 +25,6 @@ MAX_MODEL_LEN = 1024 -gptq_marlin_not_supported = True - -if torch.cuda.is_available(): - capability = torch.cuda.get_device_capability() - capability = capability[0] * 10 + capability[1] - gptq_marlin_not_supported = ( - capability < QUANTIZATION_METHODS["gptq_marlin"].get_min_capability()) - MODELS = [ # act_order==False, group_size=channelwise ("robertgshaw2/zephyr-7b-beta-channelwise-gptq", "main"), @@ -57,7 +48,7 @@ @pytest.mark.flaky(reruns=3) -@pytest.mark.skipif(gptq_marlin_not_supported, +@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"), reason="gptq_marlin is not supported on this GPU type.") @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half", "bfloat16"]) diff --git a/tests/models/test_gptq_marlin_24.py b/tests/models/test_gptq_marlin_24.py index b4159ff0a4968..74377d47a4a24 100644 --- a/tests/models/test_gptq_marlin_24.py +++ b/tests/models/test_gptq_marlin_24.py @@ -9,24 +9,15 @@ from dataclasses import dataclass import pytest -import torch from tests.models.utils import check_logprobs_close from tests.nm_utils.utils_skip import should_skip_test_group -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from tests.quantization.utils import is_quant_method_supported if should_skip_test_group(group_name="TEST_MODELS"): pytest.skip("TEST_MODELS=DISABLE, skipping model test group", allow_module_level=True) -marlin_not_supported = True - -if torch.cuda.is_available(): - capability = torch.cuda.get_device_capability() - capability = capability[0] * 10 + capability[1] - marlin_not_supported = ( - capability < QUANTIZATION_METHODS["marlin"].get_min_capability()) - @dataclass class ModelPair: @@ -52,7 +43,7 @@ class ModelPair: @pytest.mark.flaky(reruns=2) -@pytest.mark.skipif(marlin_not_supported, +@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin_24"), reason="Marlin24 is not supported on this GPU type.") @pytest.mark.parametrize("model_pair", model_pairs) @pytest.mark.parametrize("dtype", ["half"]) diff --git a/tests/models/test_llava_next.py b/tests/models/test_llava_next.py index aa6ee268ae588..df378b0a722e2 100644 --- a/tests/models/test_llava_next.py +++ b/tests/models/test_llava_next.py @@ -3,12 +3,17 @@ import pytest from transformers import AutoTokenizer +from tests.nm_utils.utils_skip import should_skip_test_group from vllm.config import VisionLanguageConfig from ..conftest import IMAGE_FILES pytestmark = pytest.mark.llava +if should_skip_test_group(group_name="TEST_MODELS"): + pytest.skip("TEST_MODELS=DISABLE, skipping model test group", + allow_module_level=True) + _PREFACE = ( "A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's " @@ -72,6 +77,8 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str], return hf_input_ids, hf_output_str +@pytest.mark.skip("Failing in NM Automation due to writing to file without " + "permissions.") @pytest.mark.xfail( reason="Inconsistent image processor being used due to lack " "of support for dynamic image token replacement") diff --git a/tests/models/test_marlin.py b/tests/models/test_marlin.py index debf018ca8a80..a46165bdfd8b6 100644 --- a/tests/models/test_marlin.py +++ b/tests/models/test_marlin.py @@ -18,24 +18,15 @@ from dataclasses import dataclass import pytest -import torch from tests.models.utils import check_logprobs_close from tests.nm_utils.utils_skip import should_skip_test_group -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from tests.quantization.utils import is_quant_method_supported if should_skip_test_group(group_name="TEST_MODELS"): pytest.skip("TEST_MODELS=DISABLE, skipping model test group", allow_module_level=True) -marlin_not_supported = True - -if torch.cuda.is_available(): - capability = torch.cuda.get_device_capability() - capability = capability[0] * 10 + capability[1] - marlin_not_supported = ( - capability < QUANTIZATION_METHODS["marlin"].get_min_capability()) - @dataclass class ModelPair: @@ -54,7 +45,7 @@ class ModelPair: @pytest.mark.flaky(reruns=2) -@pytest.mark.skipif(marlin_not_supported, +@pytest.mark.skipif(not is_quant_method_supported("marlin"), reason="Marlin is not supported on this GPU type.") @pytest.mark.parametrize("model_pair", model_pairs) @pytest.mark.parametrize("dtype", ["half"]) diff --git a/tests/quantization/test_bitsandbytes.py b/tests/quantization/test_bitsandbytes.py index 31e938d15a1f6..953fd9ba939c8 100644 --- a/tests/quantization/test_bitsandbytes.py +++ b/tests/quantization/test_bitsandbytes.py @@ -5,16 +5,12 @@ import pytest import torch +from tests.quantization.utils import is_quant_method_supported from vllm import SamplingParams -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS -capability = torch.cuda.get_device_capability() -capability = capability[0] * 10 + capability[1] - -@pytest.mark.skipif( - capability < QUANTIZATION_METHODS['bitsandbytes'].get_min_capability(), - reason='bitsandbytes is not supported on this GPU type.') +@pytest.mark.skipif(not is_quant_method_supported("bitsandbytes"), + reason='bitsandbytes is not supported on this GPU type.') def test_load_bnb_model(vllm_runner) -> None: with vllm_runner('huggyllama/llama-7b', quantization='bitsandbytes', diff --git a/tests/quantization/test_compressed_tensors.py b/tests/quantization/test_compressed_tensors.py index 510175146910d..936f2f0f7830e 100644 --- a/tests/quantization/test_compressed_tensors.py +++ b/tests/quantization/test_compressed_tensors.py @@ -9,8 +9,8 @@ from tests.nm_utils.utils_skip import should_skip_test_group from vllm import SamplingParams from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors import ( # noqa: E501 - CompressedTensorsLinearMethod, CompressedTensorsW8A8DynamicToken, - CompressedTensorsW8A8StaticTensor) + CompressedTensorsLinearMethod, CompressedTensorsW4A16, + CompressedTensorsW8A8DynamicToken, CompressedTensorsW8A8StaticTensor) if should_skip_test_group(group_name="TEST_QUANTIZATION"): pytest.skip("TEST_QUANTIZATION=DISABLE, skipping quantization test group", @@ -66,3 +66,25 @@ def test_compressed_tensors_w8a8_dynanmic_per_token(vllm_runner): assert isinstance(qkv_proj.quant_method, CompressedTensorsLinearMethod) assert isinstance(qkv_proj.scheme, CompressedTensorsW8A8DynamicToken) assert qkv_proj.weight.dtype is torch.int8 + + +@pytest.mark.parametrize("w4a16_args", [ + ("nm-testing/tinyllama-oneshot-w4a16-channel-v2", "channel", None), + ("nm-testing/tinyllama-oneshot-w4a16-group128-v2", "group", 128), +]) +def test_compressed_tensors_w4a16(vllm_runner, w4a16_args): + model, strategy, group = w4a16_args + with vllm_runner(model) as llm: + model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model # noqa: E501 + layer = model.model.layers[0] + + qkv_proj = layer.self_attn.qkv_proj + assert isinstance(qkv_proj.quant_method, CompressedTensorsLinearMethod) + assert isinstance(qkv_proj.scheme, CompressedTensorsW4A16) + + assert qkv_proj.scheme.strategy == strategy + assert qkv_proj.scheme.group_size == group + + assert qkv_proj.weight_packed.dtype is torch.int32 + assert qkv_proj.weight_scale.dtype is torch.float16 + assert qkv_proj.weight_packed.pack_factor == 8 diff --git a/tests/quantization/test_fp8.py b/tests/quantization/test_fp8.py index 96bcd8a491d6c..583c431001f33 100644 --- a/tests/quantization/test_fp8.py +++ b/tests/quantization/test_fp8.py @@ -6,20 +6,17 @@ import torch from tests.nm_utils.utils_skip import should_skip_test_group -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from tests.quantization.utils import is_quant_method_supported +from vllm._custom_ops import scaled_fp8_quant from vllm.model_executor.layers.quantization.fp8 import Fp8LinearMethod if should_skip_test_group(group_name="TEST_QUANTIZATION"): pytest.skip("TEST_QUANTIZATION=DISABLE, skipping quantization test group", allow_module_level=True) -capability = torch.cuda.get_device_capability() -capability = capability[0] * 10 + capability[1] - -@pytest.mark.skipif( - capability < QUANTIZATION_METHODS["fp8"].get_min_capability(), - reason="FP8 is not supported on this GPU type.") +@pytest.mark.skipif(not is_quant_method_supported("fp8"), + reason="FP8 is not supported on this GPU type.") def test_load_fp16_model(vllm_runner) -> None: with vllm_runner("facebook/opt-125m", quantization="fp8") as llm: @@ -27,3 +24,48 @@ def test_load_fp16_model(vllm_runner) -> None: fc1 = model.model.decoder.layers[0].fc1 assert isinstance(fc1.quant_method, Fp8LinearMethod) assert fc1.weight.dtype == torch.float8_e4m3fn + + +@pytest.mark.skipif(not is_quant_method_supported("fp8"), + reason="FP8 is not supported on this GPU type.") +@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16]) +def test_scaled_fp8_quant(dtype) -> None: + + def quantize_ref(tensor, inv_scale): + # The reference implementation that fully aligns to + # the kernel being tested. + finfo = torch.finfo(torch.float8_e4m3fn) + scale = inv_scale.reciprocal() + qweight = (tensor.to(torch.float32) * scale).clamp(min=finfo.min, + max=finfo.max) + qweight = qweight.to(torch.float8_e4m3fn) + return qweight + + def per_tensor_dequantize(tensor, inv_scale, dtype): + fake_qweight = tensor.to(dtype) + dq_weight = fake_qweight * inv_scale + return dq_weight + + # Note that we use a shape % 4 != 0 to cover edge cases, + # because scaled_fp8_quant is vectorized by 4. + x = (torch.randn(size=(11, 11), device="cuda") * 13).to(dtype) + + # Dynamic quantization + ref_y, inv_scale = scaled_fp8_quant(x, None) + ref_y = per_tensor_dequantize(ref_y, inv_scale, dtype) + + # Reference dynamic quantizaton + y = quantize_ref(x, inv_scale) + assert torch.allclose(ref_y, per_tensor_dequantize(y, inv_scale, dtype)) + + # Static quantization + y, _ = scaled_fp8_quant(x, inv_scale) + assert torch.allclose(ref_y, per_tensor_dequantize(y, inv_scale, dtype)) + + # Padding + y, _ = scaled_fp8_quant(x, inv_scale, batch_dim_padding=17) + assert y.shape[0] == 17 + assert torch.allclose( + ref_y, + per_tensor_dequantize(torch.narrow(y, 0, 0, x.shape[0]), inv_scale, + dtype)) diff --git a/tests/quantization/utils.py b/tests/quantization/utils.py new file mode 100644 index 0000000000000..0c92d565d0ddd --- /dev/null +++ b/tests/quantization/utils.py @@ -0,0 +1,14 @@ +import torch + +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + + +def is_quant_method_supported(quant_method: str) -> bool: + # Currently, all quantization methods require Nvidia or AMD GPUs + if not torch.cuda.is_available(): + return False + + capability = torch.cuda.get_device_capability() + capability = capability[0] * 10 + capability[1] + return (capability < + QUANTIZATION_METHODS[quant_method].get_min_capability()) diff --git a/tests/tensorizer_loader/test_tensorizer.py b/tests/tensorizer_loader/test_tensorizer.py index 39dc67a3f336f..e665fa4e43851 100644 --- a/tests/tensorizer_loader/test_tensorizer.py +++ b/tests/tensorizer_loader/test_tensorizer.py @@ -1,22 +1,29 @@ import json import os +import pathlib import subprocess from unittest.mock import MagicMock, patch import openai import pytest import ray +import torch +from tensorizer import EncryptionParams from tests.nm_utils.utils_skip import should_skip_test_group -from tests.utils import ServerRunner from vllm import SamplingParams +from vllm.engine.arg_utils import EngineArgs # yapf: disable from vllm.model_executor.model_loader.tensorizer import (TensorizerConfig, TensorSerializer, is_vllm_tensorized, load_with_tensorizer, open_stream, - serialize_vllm_model) + serialize_vllm_model, + tensorize_vllm_model) + +from ..conftest import VllmRunner, cleanup +from ..utils import RemoteOpenAIServer # yapf conflicts with isort for this docstring @@ -46,6 +53,20 @@ def is_curl_installed(): except (subprocess.CalledProcessError, FileNotFoundError): return False +def get_torch_model(vllm_runner: VllmRunner): + return vllm_runner \ + .model \ + .llm_engine \ + .model_executor \ + .driver_worker \ + .model_runner \ + .model + +def write_keyfile(keyfile_path: str): + encryption_params = EncryptionParams.random() + pathlib.Path(keyfile_path).parent.mkdir(parents=True, exist_ok=True) + with open(keyfile_path, 'wb') as f: + f.write(encryption_params.key) @pytest.fixture(autouse=True) def tensorizer_config(): @@ -98,12 +119,17 @@ def test_deserialized_encrypted_vllm_model_has_same_outputs( with vllm_runner(model_ref) as vllm_model: model_path = tmp_path / (model_ref + ".tensors") key_path = tmp_path / (model_ref + ".key") + write_keyfile(key_path) + outputs = vllm_model.generate(prompts, sampling_params) - config_for_serializing = TensorizerConfig(tensorizer_uri=model_path) - serialize_vllm_model(vllm_model.model.llm_engine, - config_for_serializing, - encryption_key_path=key_path) + config_for_serializing = TensorizerConfig( + tensorizer_uri=model_path, + encryption_keyfile=key_path + ) + serialize_vllm_model(get_torch_model(vllm_model), + config_for_serializing) + config_for_deserializing = TensorizerConfig(tensorizer_uri=model_path, encryption_keyfile=key_path) @@ -155,7 +181,7 @@ def test_vllm_model_can_load_with_lora(vllm_runner, tmp_path): with vllm_runner(model_ref, ) as vllm_model: model_path = tmp_path / (model_ref + ".tensors") - serialize_vllm_model(vllm_model.model.llm_engine, + serialize_vllm_model(get_torch_model(vllm_model), TensorizerConfig(tensorizer_uri=model_path)) with vllm_runner( @@ -190,7 +216,7 @@ def test_openai_apiserver_with_tensorizer(vllm_runner, tmp_path): with vllm_runner(model_ref, ) as vllm_model: model_path = tmp_path / (model_ref + ".tensors") - serialize_vllm_model(vllm_model.model.llm_engine, + serialize_vllm_model(get_torch_model(vllm_model), TensorizerConfig(tensorizer_uri=model_path)) model_loader_extra_config = { @@ -201,18 +227,13 @@ def test_openai_apiserver_with_tensorizer(vllm_runner, tmp_path): openai_args = [ "--model", model_ref, "--dtype", "float16", "--load-format", "tensorizer", "--model-loader-extra-config", - json.dumps(model_loader_extra_config), "--port", "8000" + json.dumps(model_loader_extra_config), ] - server = ServerRunner.remote(openai_args) - - assert ray.get(server.ready.remote()) + server = RemoteOpenAIServer(openai_args) print("Server ready.") - client = openai.OpenAI( - base_url="http://localhost:8000/v1", - api_key="token-abc123", - ) + client = server.get_client() completion = client.completions.create(model=model_ref, prompt="Hello, my name is", max_tokens=5, @@ -234,9 +255,9 @@ def test_raise_value_error_on_invalid_load_format(vllm_runner): model_loader_extra_config=TensorizerConfig(tensorizer_uri="test")) -@pytest.mark.skip("Failing in Automation due to " - "'NameError: name 'ncclGetVersion' is not defined'") -def test_tensorizer_with_tp(vllm_runner): +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Requires 2 GPUs") +def test_tensorizer_with_tp_path_without_template(vllm_runner): with pytest.raises(ValueError): model_ref = "EleutherAI/pythia-1.4b" tensorized_path = f"s3://tensorized/{model_ref}/fp16/model.tensors" @@ -250,8 +271,62 @@ def test_tensorizer_with_tp(vllm_runner): s3_endpoint="object.ord1.coreweave.com", ), tensor_parallel_size=2, + disable_custom_all_reduce=True, ) +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Requires 2 GPUs") +def test_deserialized_encrypted_vllm_model_with_tp_has_same_outputs(vllm_runner, + tmp_path): + model_ref = "EleutherAI/pythia-1.4b" + # record outputs from un-sharded un-tensorized model + base_model = vllm_runner( + model_ref, + disable_custom_all_reduce=True, + enforce_eager=True, + ) + outputs = base_model.generate(prompts, sampling_params) + + base_model.model.llm_engine.model_executor.shutdown() + del base_model + cleanup() + ray.shutdown() + + # load model with two shards and serialize with encryption + model_path = str(tmp_path / (model_ref + "-%02d.tensors")) + key_path = tmp_path / (model_ref + ".key") + + tensorizer_config = TensorizerConfig( + tensorizer_uri=model_path, + encryption_keyfile=key_path, + ) + + tensorize_vllm_model( + engine_args=EngineArgs( + model=model_ref, + tensor_parallel_size=2, + disable_custom_all_reduce=True, + enforce_eager=True, + ), + tensorizer_config=tensorizer_config, + ) + assert os.path.isfile(model_path % 0), "Serialization subprocess failed" + assert os.path.isfile(model_path % 1), "Serialization subprocess failed" + cleanup() + ray.shutdown() + + loaded_vllm_model = vllm_runner( + model_ref, + tensor_parallel_size=2, + load_format="tensorizer", + disable_custom_all_reduce=True, + enforce_eager=True, + model_loader_extra_config=tensorizer_config) + + deserialized_outputs = loaded_vllm_model.generate(prompts, sampling_params) + + assert outputs == deserialized_outputs + def test_vllm_tensorized_model_has_same_outputs(vllm_runner, tmp_path): model_ref = "facebook/opt-125m" @@ -260,7 +335,7 @@ def test_vllm_tensorized_model_has_same_outputs(vllm_runner, tmp_path): with vllm_runner(model_ref) as vllm_model: outputs = vllm_model.generate(prompts, sampling_params) - serialize_vllm_model(vllm_model.model.llm_engine, config) + serialize_vllm_model(get_torch_model(vllm_model), config) assert is_vllm_tensorized(config) diff --git a/tests/test_sharded_state_loader.py b/tests/test_sharded_state_loader.py index 5bb3a5c5d65e0..9d588b8af9126 100644 --- a/tests/test_sharded_state_loader.py +++ b/tests/test_sharded_state_loader.py @@ -39,7 +39,7 @@ def test_filter_subtensors(): filtered_state_dict = ShardedStateLoader._filter_subtensors(state_dict) assert tuple(filtered_state_dict.keys()) == ("a", "b", "c") for key, tensor in filtered_state_dict.items(): - # NOTE: don't use `euqal` here, as the tensor might contain NaNs + # NOTE: don't use `equal` here, as the tensor might contain NaNs assert tensor is state_dict[key] diff --git a/tests/utils.py b/tests/utils.py index cc8b862769475..c84364d20fc63 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -4,57 +4,109 @@ import time import warnings from contextlib import contextmanager +from typing import List +import openai import ray import requests from vllm.distributed import (ensure_model_parallel_initialized, init_distributed_environment) +from vllm.entrypoints.openai.cli_args import make_arg_parser from vllm.utils import get_open_port # Path to root of repository so that utilities can be imported by ray workers VLLM_PATH = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir)) -@ray.remote(num_gpus=1) -class ServerRunner: +class RemoteOpenAIServer: + DUMMY_API_KEY = "token-abc123" # vLLM's OpenAI server does not need API key MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds - def __init__(self, args): - env = os.environ.copy() - env["PYTHONUNBUFFERED"] = "1" - self.proc = subprocess.Popen( - [sys.executable, "-m", "vllm.entrypoints.openai.api_server"] + - args, - env=env, - stdout=sys.stdout, - stderr=sys.stderr, + @ray.remote(num_gpus=1) + class _RemoteRunner: + + def __init__(self, cli_args: List[str], *, wait_url: str, + wait_timeout: float) -> None: + env = os.environ.copy() + env["PYTHONUNBUFFERED"] = "1" + self.proc = subprocess.Popen( + [ + sys.executable, "-m", "vllm.entrypoints.openai.api_server", + *cli_args + ], + env=env, + stdout=sys.stdout, + stderr=sys.stderr, + ) + + self._wait_for_server(url=wait_url, timeout=wait_timeout) + + def ready(self): + return True + + def _wait_for_server(self, *, url: str, timeout: float): + # run health check + start = time.time() + while True: + try: + if requests.get(url).status_code == 200: + break + except Exception as err: + if self.proc.poll() is not None: + raise RuntimeError( + "Server exited unexpectedly.") from err + + time.sleep(0.5) + if time.time() - start > timeout: + raise RuntimeError( + "Server failed to start in time.") from err + + def __del__(self): + if hasattr(self, "proc"): + self.proc.terminate() + + def __init__(self, cli_args: List[str], *, auto_port: bool = True) -> None: + if auto_port: + if "-p" in cli_args or "--port" in cli_args: + raise ValueError("You have manually specified the port" + "when `auto_port=True`.") + + cli_args = cli_args + ["--port", str(get_open_port())] + + parser = make_arg_parser() + args = parser.parse_args(cli_args) + self.host = str(args.host or 'localhost') + self.port = int(args.port) + + self._runner = self._RemoteRunner.remote( + cli_args, + wait_url=self.url_for("health"), + wait_timeout=self.MAX_SERVER_START_WAIT_S) + + self._wait_until_ready() + + @property + def url_root(self) -> str: + return f"http://{self.host}:{self.port}" + + def url_for(self, *parts: str) -> str: + return self.url_root + "/" + "/".join(parts) + + def _wait_until_ready(self) -> None: + ray.get(self._runner.ready.remote()) + + def get_client(self): + return openai.OpenAI( + base_url=self.url_for("v1"), + api_key=self.DUMMY_API_KEY, + ) + + def get_async_client(self): + return openai.AsyncOpenAI( + base_url=self.url_for("v1"), + api_key=self.DUMMY_API_KEY, ) - self._wait_for_server() - - def ready(self): - return True - - def _wait_for_server(self): - # run health check - start = time.time() - while True: - try: - if requests.get( - "http://localhost:8000/health").status_code == 200: - break - except Exception as err: - if self.proc.poll() is not None: - raise RuntimeError("Server exited unexpectedly.") from err - - time.sleep(0.5) - if time.time() - start > self.MAX_SERVER_START_WAIT_S: - raise RuntimeError( - "Server failed to start in time.") from err - - def __del__(self): - if hasattr(self, "proc"): - self.proc.terminate() def init_test_distributed_environment( diff --git a/tests/worker/test_model_runner.py b/tests/worker/test_model_runner.py index e40de0dc027d8..b048d9f7e1d9d 100644 --- a/tests/worker/test_model_runner.py +++ b/tests/worker/test_model_runner.py @@ -2,7 +2,8 @@ import torch from tests.nm_utils.utils_skip import should_skip_test_group -from vllm.distributed.parallel_state import init_distributed_environment +from vllm.distributed.parallel_state import (ensure_model_parallel_initialized, + init_distributed_environment) from vllm.engine.arg_utils import EngineArgs from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import SamplingParams, SequenceData, SequenceGroupMetadata @@ -297,6 +298,7 @@ def distributed_init(): rank=0, distributed_init_method=f"tcp://127.0.0.1:{get_open_port()}", local_rank=0) + ensure_model_parallel_initialized(1, 1) @pytest.mark.parametrize("batch_size", list(range(2, 128))) diff --git a/vllm/__init__.py b/vllm/__init__.py index 84dc6399e83ae..c7baad34885ce 100644 --- a/vllm/__init__.py +++ b/vllm/__init__.py @@ -12,10 +12,16 @@ from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams -# UPSTREAM SYNC: use the current downstream. -__version__ = "0.5.0" +from .version import __version__ + + +def githash(): + import torch + return torch.ops._C.githash() + __all__ = [ + "__version__", "githash", "LLM", "ModelRegistry", @@ -34,8 +40,3 @@ "initialize_ray_cluster", "PoolingParams", ] - - -def githash(): - import torch - return torch.ops._C.githash() diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 440b0e8afa99a..2f84b8bde6b57 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -1,13 +1,16 @@ import contextlib +import functools from typing import List, Optional, Tuple, Type import torch +from vllm.logger import init_logger + +logger = init_logger(__name__) + try: import vllm._C except ImportError as e: - from vllm.logger import init_logger - logger = init_logger(__name__) logger.warning("Failed to import from vllm._C with %r", e) with contextlib.suppress(ImportError): @@ -23,6 +26,25 @@ def is_custom_op_supported(op_name: str) -> bool: return op is not None +def hint_on_error(fn): + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + try: + return fn(*args, **kwargs) + except AttributeError as e: + msg = ( + "Error in calling custom op %s: %s\n" + "Possibly you have built or installed an obsolete version of vllm.\n" + "Please try a clean build and install of vllm," + "or remove old built files such as vllm/*cpython*.so and build/ ." + ) + logger.error(msg, fn.__name__, e) + raise e + + return wrapper + + # activation ops def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: torch.ops._C.silu_and_mul(out, x) @@ -190,9 +212,9 @@ def gptq_marlin_24_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, # cutlass -def cutlass_scaled_mm_dq(a: torch.Tensor, b: torch.Tensor, - scale_a: torch.Tensor, scale_b: torch.Tensor, - out_dtype: Type[torch.dtype]) -> torch.Tensor: +def cutlass_scaled_mm(a: torch.Tensor, b: torch.Tensor, scale_a: torch.Tensor, + scale_b: torch.Tensor, + out_dtype: Type[torch.dtype]) -> torch.Tensor: assert (b.shape[0] % 16 == 0 and b.shape[1] % 16 == 0) assert (out_dtype is torch.bfloat16 or out_dtype is torch.float16) @@ -200,8 +222,7 @@ def cutlass_scaled_mm_dq(a: torch.Tensor, b: torch.Tensor, n = b.shape[1] out = torch.empty((m, n), dtype=out_dtype, device=a.device) - torch.ops._C.cutlass_scaled_mm_dq(out, a, b, scale_a, scale_b) - + torch.ops._C.cutlass_scaled_mm(out, a, b, scale_a, scale_b) return out @@ -459,3 +480,25 @@ def dispatch_bgmv_low_level( h_out, y_offset, ) + + +# temporary fix for https://github.com/vllm-project/vllm/issues/5456 +# TODO: remove this in v0.6.0 +names_and_values = globals() +names_and_values_to_update = {} +# prepare variables to avoid dict size change during iteration +k, v, arg = None, None, None +fn_type = type(lambda x: x) +for k, v in names_and_values.items(): + # find functions that are defined in this file and have torch.Tensor + # in their annotations. `arg == "torch.Tensor"` is used to handle + # the case when users use `import __annotations__` to turn type + # hints into strings. + if isinstance(v, fn_type) \ + and v.__code__.co_filename == __file__ \ + and any(arg is torch.Tensor or arg == "torch.Tensor" + for arg in v.__annotations__.values()): + names_and_values_to_update[k] = hint_on_error(v) + +names_and_values.update(names_and_values_to_update) +del names_and_values_to_update, names_and_values, v, k, fn_type diff --git a/vllm/attention/backends/flash_attn.py b/vllm/attention/backends/flash_attn.py index 8c64c2bfdeb8f..300bab72877b8 100644 --- a/vllm/attention/backends/flash_attn.py +++ b/vllm/attention/backends/flash_attn.py @@ -317,7 +317,7 @@ def forward( # normal attention # When block_tables are not filled, it means q and k are the # prompt, and they have the same length. - flash_attn_varlen_func( + out = flash_attn_varlen_func( q=query, k=key, v=value, @@ -329,13 +329,14 @@ def forward( causal=True, window_size=self.sliding_window, alibi_slopes=self.alibi_slopes, - out=output[:num_prefill_tokens], ) + assert output[:num_prefill_tokens].shape == out.shape + output[:num_prefill_tokens] = out else: # prefix-enabled attention assert prefill_meta.seq_lens is not None max_seq_len = max(prefill_meta.seq_lens) - flash_attn_varlen_func( + output[:num_prefill_tokens] = flash_attn_varlen_func( q=query, k=key_cache, v=value_cache, @@ -347,12 +348,11 @@ def forward( causal=True, alibi_slopes=self.alibi_slopes, block_table=prefill_meta.block_tables, - out=output[:num_prefill_tokens], ) if decode_meta := attn_metadata.decode_metadata: # Decoding run. - flash_attn_with_kvcache( + output[num_prefill_tokens:] = flash_attn_with_kvcache( decode_query.unsqueeze(1), key_cache, value_cache, @@ -361,8 +361,7 @@ def forward( softmax_scale=self.scale, causal=True, alibi_slopes=self.alibi_slopes, - out=output[num_prefill_tokens:].unsqueeze(1), - ) + ).squeeze(1) # Reshape the output tensor. return output.view(num_tokens, hidden_size) diff --git a/vllm/attention/backends/pallas.py b/vllm/attention/backends/pallas.py new file mode 100644 index 0000000000000..75f2465264ad3 --- /dev/null +++ b/vllm/attention/backends/pallas.py @@ -0,0 +1,232 @@ +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Type + +import torch +import torch_xla.experimental.custom_kernel # Required to register custom ops. +import torch_xla.experimental.dynamo_set_buffer_donor + +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata) + + +class PallasAttentionBackend(AttentionBackend): + + @staticmethod + def get_impl_cls() -> Type["PallasAttentionBackendImpl"]: + return PallasAttentionBackendImpl + + @staticmethod + def make_metadata(*args, **kwargs) -> "PallasMetadata": + return PallasMetadata(*args, **kwargs) + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> Tuple[int, ...]: + return (num_kv_heads, num_blocks, block_size, head_size) + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + ) -> None: + raise NotImplementedError("swap_blocks is not implemented.") + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + ) -> None: + # TODO(woosuk): Implement this. + raise NotImplementedError("copy_blocks is not implemented.") + + +@dataclass +class PallasMetadata(AttentionMetadata): + + # Currently, input sequences can only contain all prefills + # or all decoding. + block_tables: Optional[torch.Tensor] + context_lens: Optional[torch.Tensor] + + @property + def prefill_metadata(self) -> Optional["PallasMetadata"]: + if self.num_prefills == 0: + return None + + assert self.num_decode_tokens == 0 + assert self.block_tables is None + assert self.context_lens is None + return self + + @property + def decode_metadata(self) -> Optional["PallasMetadata"]: + if self.num_decode_tokens == 0: + return None + + assert self.num_prefills == 0 + assert self.num_prefill_tokens == 0 + assert self.block_tables is not None + assert self.context_lens is not None + return self + + +class PallasAttentionBackendImpl(AttentionImpl): + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: int, + alibi_slopes: Optional[List[float]], + sliding_window: Optional[int], + kv_cache_dtype: str, + blocksparse_params: Optional[Dict[str, Any]] = None, + ) -> None: + self.num_heads = num_heads + self.head_size = head_size + self.scale = float(scale) + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + + assert self.num_heads % self.num_kv_heads == 0 + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + if head_size % 128 != 0: + raise NotImplementedError("Head size must be a multiple of 128.") + if alibi_slopes is not None: + raise NotImplementedError("Alibi slopes is not supported.") + if sliding_window is not None: + raise NotImplementedError("Sliding window is not supported.") + if kv_cache_dtype != "auto": + raise NotImplementedError("FP8 KV cache dtype is not supported.") + if blocksparse_params is not None: + raise NotImplementedError("Blocksparse is not supported.") + + if torch_xla.tpu.version() < 4: + raise NotImplementedError("TPU version must be 4 or higher.") + + self.megacore_mode = None + tpu_type = torch_xla.tpu.get_tp_groupu_env()["TYPE"].lower() + if not tpu_type.endswith("lite"): + if self.num_kv_heads % 2 == 0: + self.megacore_mode = "kv_head" + else: + # NOTE(woosuk): If the batch size is not a multiple of 2, the + # megacore mode will be None. + self.megacore_mode = "batch" + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: Tuple[Optional[torch.Tensor], Optional[torch.Tensor]], + attn_metadata: PallasMetadata, + kv_scale: float = 1.0, + ) -> torch.Tensor: + """Forward pass with Pallas attention. + + Args: + query: shape = [batch_size, seq_len, num_heads * head_size] + key: shape = [batch_size, seq_len, num_kv_heads * head_size] + value: shape = [batch_size, seq_len, num_kv_heads * head_size] + key_cache = [num_kv_heads, num_blocks, block_size, head_size] + value_cache = [num_kv_heads, num_blocks, block_size, head_size] + attn_metadata: Metadata for attention. + Returns: + shape = [batch_size, seq_len, num_heads * head_size] + """ + assert kv_scale == 1.0 + batch_size, seq_len, hidden_size = query.shape + query = query.view(batch_size, seq_len, self.num_heads, self.head_size) + key = key.view(batch_size, seq_len, self.num_kv_heads, self.head_size) + value = value.view(batch_size, seq_len, self.num_kv_heads, + self.head_size) + + if kv_cache[0] is not None: + slot_mapping = attn_metadata.slot_mapping + key_cache, value_cache = kv_cache + write_to_kv_cache(key, value, key_cache, value_cache, slot_mapping) + + query = query * self.scale + if attn_metadata.num_prefills > 0: + assert seq_len % 16 == 0, ( + "Pallas FlashAttention kernel requires seq_len to be a " + f"multiple of 16 but got {seq_len}") + + # Handle GQA/MQA. + if self.num_kv_heads != self.num_heads: + key = key.repeat_interleave(self.num_queries_per_kv, dim=-2) + key = key.view(batch_size, seq_len, self.num_heads, + self.head_size) + value = value.repeat_interleave(self.num_queries_per_kv, + dim=-2) + value = value.view(batch_size, seq_len, self.num_heads, + self.head_size) + # FlashAttention requires [batch_size, num_heads, seq_len, d_model] + # while the input is [batch_size, seq_len, num_heads, d_model]. + # Permute the input to match the required format. + output = torch.ops.xla.flash_attention( + query.permute(0, 2, 1, 3), + key.permute(0, 2, 1, 3), + value.permute(0, 2, 1, 3), + True, + ) + output = output.permute(0, 2, 1, 3) + else: + # Decoding run. + assert kv_cache is not None + + pages_per_compute_block = 16 # TODO(woosuk): Tune this value. + if self.megacore_mode == "batch" and batch_size % 2 != 0: + megacore_mode = None + else: + megacore_mode = self.megacore_mode + + # NOTE(woosuk): A temporary workaround to avoid the error: + # "xla::paged_attention() Expected a value of type 'str' for + # argument 'megacore_mode' but instead found type 'NoneType'." + if megacore_mode is not None: + output = torch.ops.xla.paged_attention( + query.squeeze(dim=1), + key_cache, + value_cache, + attn_metadata.context_lens, + attn_metadata.block_tables, + pages_per_compute_block, + megacore_mode=megacore_mode, + ) + else: + output = torch.ops.xla.paged_attention( + query.squeeze(dim=1), + key_cache, + value_cache, + attn_metadata.context_lens, + attn_metadata.block_tables, + pages_per_compute_block, + ) + + # Reshape the output tensor. + return output.reshape(batch_size, seq_len, hidden_size) + + +def write_to_kv_cache( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, +) -> None: + torch.ops.xla.dynamo_set_buffer_donor_(key_cache, True) + torch.ops.xla.dynamo_set_buffer_donor_(value_cache, True) + + key = key.flatten(0, 2) + value = value.flatten(0, 2) + key_cache = key_cache.flatten(0, 2) + value_cache = value_cache.flatten(0, 2) + key_cache.index_copy_(0, slot_mapping, key) + value_cache.index_copy_(0, slot_mapping, value) diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 9b50adec5244d..4b08cce99afb0 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -8,8 +8,16 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionMetadata) -from vllm.attention.ops.paged_attn import (PagedAttention, - PagedAttentionMetadata) +from vllm.attention.ops.paged_attn import PagedAttentionMetadata +from vllm.utils import is_cpu + +if is_cpu(): + try: + from vllm.attention.ops.ipex_attn import PagedAttention + except ImportError: + from vllm.attention.ops.paged_attn import PagedAttention +else: + from vllm.attention.ops.paged_attn import PagedAttention class TorchSDPABackend(AttentionBackend): @@ -197,13 +205,14 @@ def forward( attn_metadata.attn_bias): end = start + seq_len sub_out = scaled_dot_product_attention( - query[:, start:end, :], - key[:, start:end, :], - value[:, start:end, :], + query[None, :, start:end, :], + key[None, :, start:end, :], + value[None, :, start:end, :], attn_mask=mask, dropout_p=0.0, is_causal=not self.need_mask, - scale=self.scale).movedim(query.dim() - 2, 0) + scale=self.scale).squeeze(0).movedim( + query.dim() - 2, 0) output[start:end, :, :] = sub_out start = end else: @@ -248,7 +257,7 @@ def _make_alibi_bias( num_heads = alibi_slopes.shape[0] bias = bias[None, :].repeat((num_heads, 1, 1)) - bias.mul_(alibi_slopes[:, None, None]) + bias.mul_(alibi_slopes[:, None, None]).unsqueeze_(0) inf_mask = torch.empty( (1, seq_len, seq_len), dtype=bias.dtype).fill_(-torch.inf).triu_(diagonal=1) diff --git a/vllm/attention/ops/ipex_attn.py b/vllm/attention/ops/ipex_attn.py new file mode 100644 index 0000000000000..5a5317b65004e --- /dev/null +++ b/vllm/attention/ops/ipex_attn.py @@ -0,0 +1,120 @@ +from typing import Dict, List, Optional, Tuple + +import intel_extension_for_pytorch.llm.modules as ipex_modules +import torch + +from vllm import _custom_ops as ops + + +class PagedAttention: + + @staticmethod + def get_supported_head_sizes() -> List[int]: + return [64, 80, 96, 112, 128, 256] + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + *args, + ) -> Tuple[int, ...]: + return (2, num_blocks, block_size * num_kv_heads * head_size) + + @staticmethod + def split_kv_cache( + kv_cache: torch.Tensor, + num_kv_heads: int, + head_size: int, + *args, + ) -> Tuple[torch.Tensor, torch.Tensor]: + num_blocks = kv_cache.shape[1] + + key_cache = kv_cache[0] + key_cache = key_cache.view(num_blocks, num_kv_heads, -1, head_size) + value_cache = kv_cache[1] + value_cache = value_cache.view(num_blocks, num_kv_heads, -1, head_size) + return key_cache, value_cache + + @staticmethod + def write_to_paged_cache( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, + kv_cache_dtype: str, + kv_scale: float, + *args, + ) -> None: + ipex_modules.PagedAttention.reshape_and_cache( + key, value, key_cache, value_cache, + slot_mapping.flatten().int()) + + @staticmethod + def forward_decode( + query: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + context_lens: torch.Tensor, + max_context_len: int, + kv_cache_dtype: str, + num_kv_heads: int, + scale: float, + alibi_slopes: Optional[torch.Tensor], + kv_scale: float, + *args, + ) -> torch.Tensor: + output = torch.empty_like(query) + block_size = value_cache.shape[2] + head_mapping = torch.arange( + 0, + num_kv_heads, + device="cpu", + dtype=torch.int32, + ).view(num_kv_heads, + 1).repeat_interleave(query.size(1) // num_kv_heads).flatten() + ipex_modules.PagedAttention.single_query_cached_kv_attention( + output, query.contiguous(), key_cache, value_cache, head_mapping, + scale, block_tables, context_lens, block_size, max_context_len, + alibi_slopes) + + return output + + @staticmethod + def forward_prefix( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + subquery_start_loc: torch.Tensor, + prompt_lens_tensor: torch.Tensor, + context_lens: torch.Tensor, + max_subquery_len: int, + alibi_slopes: Optional[torch.Tensor], + *args, + ) -> torch.Tensor: + raise NotImplementedError + + @staticmethod + def swap_blocks( + src_kv_cache: torch.Tensor, + dst_kv_cache: torch.Tensor, + src_to_dst: Dict[int, int], + *args, + ) -> None: + raise NotImplementedError + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + *args, + ) -> None: + key_caches = [kv_cache[0] for kv_cache in kv_caches] + value_caches = [kv_cache[1] for kv_cache in kv_caches] + ops.copy_blocks(key_caches, value_caches, src_to_dists) diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py index 7253483f9a0b8..8b07fb2d768f5 100644 --- a/vllm/attention/selector.py +++ b/vllm/attention/selector.py @@ -7,7 +7,7 @@ import vllm.envs as envs from vllm.attention.backends.abstract import AttentionBackend from vllm.logger import init_logger -from vllm.utils import is_cpu, is_hip +from vllm.utils import is_cpu, is_hip, is_tpu logger = init_logger(__name__) @@ -18,6 +18,7 @@ class _Backend(enum.Enum): ROCM_FLASH = enum.auto() TORCH_SDPA = enum.auto() FLASHINFER = enum.auto() + PALLAS = enum.auto() @lru_cache(maxsize=None) @@ -57,6 +58,9 @@ def get_attn_backend( ROCmFlashAttentionBackend) return ROCmFlashAttentionBackend elif backend == _Backend.TORCH_SDPA: + # TODO: make XPU backend available here. + assert is_cpu(), RuntimeError( + "Torch SDPA backend is only used for the CPU device.") logger.info("Using Torch SDPA backend.") from vllm.attention.backends.torch_sdpa import TorchSDPABackend return TorchSDPABackend @@ -66,6 +70,10 @@ def get_attn_backend( "Please make sure --enforce-eager is set.") from vllm.attention.backends.flashinfer import FlashInferBackend return FlashInferBackend + elif backend == _Backend.PALLAS: + logger.info("Using Pallas backend.") + from vllm.attention.backends.pallas import PallasAttentionBackend + return PallasAttentionBackend else: raise ValueError("Invalid attention backend.") @@ -80,7 +88,6 @@ def which_attn_to_use( block_size: int, ) -> _Backend: """Returns which flash attention backend to use.""" - # Default case. selected_backend = _Backend.FLASH_ATTN @@ -100,6 +107,11 @@ def which_attn_to_use( logger.info("Cannot use %s backend on CPU.", selected_backend) return _Backend.TORCH_SDPA + if is_tpu(): + if selected_backend != _Backend.PALLAS: + logger.info("Cannot use %s backend on TPU.", selected_backend) + return _Backend.PALLAS + if is_hip(): # AMD GPUs. selected_backend = (_Backend.ROCM_FLASH if selected_backend diff --git a/vllm/config.py b/vllm/config.py index 4453d0fc9851f..403959cb79d22 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -11,7 +11,8 @@ from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS from vllm.model_executor.models import ModelRegistry from vllm.transformers_utils.config import get_config, get_hf_text_config -from vllm.utils import get_cpu_memory, is_cpu, is_hip, is_neuron +from vllm.utils import (cuda_device_count_stateless, get_cpu_memory, is_cpu, + is_hip, is_neuron, is_tpu) if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup @@ -244,7 +245,7 @@ def _verify_quantization(self) -> None: f"{self.quantization} quantization is currently not " f"supported in ROCm.") if (self.quantization - not in ["marlin", "gptq_marlin_24", "gptq_marlin"]): + not in ("fp8", "marlin", "gptq_marlin_24", "gptq_marlin")): logger.warning( "%s quantization is not fully " "optimized yet. The speed can be slower than " @@ -637,12 +638,11 @@ def __init__( if self.distributed_executor_backend is None and self.world_size > 1: # We use multiprocessing by default if world_size fits on the # current node and we aren't in a ray placement group. - from torch.cuda import device_count from vllm.executor import ray_utils backend = "mp" ray_found = ray_utils.ray is not None - if device_count() < self.world_size: + if cuda_device_count_stateless() < self.world_size: if not ray_found: raise ValueError("Unable to load Ray which is " "required for multi-node inference") @@ -780,6 +780,8 @@ def __init__(self, device: str = "auto") -> None: # Automated device type detection if is_neuron(): self.device_type = "neuron" + elif is_tpu(): + self.device_type = "tpu" elif is_cpu(): self.device_type = "cpu" else: @@ -793,6 +795,8 @@ def __init__(self, device: str = "auto") -> None: # Some device types require processing inputs on CPU if self.device_type in ["neuron"]: self.device = torch.device("cpu") + elif self.device_type in ["tpu"]: + self.device = None else: # Set device with device type self.device = torch.device(self.device_type) diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index bb37c5f313617..48c34625c08ae 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -50,8 +50,8 @@ class SchedulingBudget: """ token_budget: int max_num_seqs: int - _requeset_ids_num_batched_tokens: Set[str] = field(default_factory=set) - _requeset_ids_num_curr_seqs: Set[str] = field(default_factory=set) + _request_ids_num_batched_tokens: Set[str] = field(default_factory=set) + _request_ids_num_curr_seqs: Set[str] = field(default_factory=set) _num_batched_tokens: int = 0 _num_curr_seqs: int = 0 @@ -65,28 +65,28 @@ def remaining_token_budget(self): return self.token_budget - self.num_batched_tokens def add_num_batched_tokens(self, req_id: str, num_batched_tokens: int): - if req_id in self._requeset_ids_num_batched_tokens: + if req_id in self._request_ids_num_batched_tokens: return - self._requeset_ids_num_batched_tokens.add(req_id) + self._request_ids_num_batched_tokens.add(req_id) self._num_batched_tokens += num_batched_tokens def subtract_num_batched_tokens(self, req_id: str, num_batched_tokens: int): - if req_id in self._requeset_ids_num_batched_tokens: - self._requeset_ids_num_batched_tokens.remove(req_id) + if req_id in self._request_ids_num_batched_tokens: + self._request_ids_num_batched_tokens.remove(req_id) self._num_batched_tokens -= num_batched_tokens def add_num_seqs(self, req_id: str, num_curr_seqs: int): - if req_id in self._requeset_ids_num_curr_seqs: + if req_id in self._request_ids_num_curr_seqs: return - self._requeset_ids_num_curr_seqs.add(req_id) + self._request_ids_num_curr_seqs.add(req_id) self._num_curr_seqs += num_curr_seqs def subtract_num_seqs(self, req_id: str, num_curr_seqs: int): - if req_id in self._requeset_ids_num_curr_seqs: - self._requeset_ids_num_curr_seqs.remove(req_id) + if req_id in self._request_ids_num_curr_seqs: + self._request_ids_num_curr_seqs.remove(req_id) self._num_curr_seqs -= num_curr_seqs @property diff --git a/vllm/distributed/communication_op.py b/vllm/distributed/communication_op.py index 2b38ec472de66..32394a07b00b9 100644 --- a/vllm/distributed/communication_op.py +++ b/vllm/distributed/communication_op.py @@ -1,317 +1,32 @@ -from collections import namedtuple -from contextlib import contextmanager, nullcontext -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Optional, Union import torch -from torch.distributed import ProcessGroup +import torch.distributed -from .parallel_state import (get_cpu_world_group, get_pp_pynccl_communicator, - get_tensor_model_parallel_group, - get_tensor_model_parallel_rank, - get_tensor_model_parallel_world_size, - get_tp_ca_communicator, - get_tp_pynccl_communicator) - - -@dataclass -class GraphCaptureContext: - stream: torch.cuda.Stream - - -@contextmanager -def graph_capture(): - """ - `graph_capture` is a context manager which should surround the code that - is capturing the CUDA graph. Its main purpose is to ensure that the - some operations will be run after the graph is captured, before the graph - is replayed. It returns a `GraphCaptureContext` object which contains the - necessary data for the graph capture. Currently, it only contains the - stream that the graph capture is running on. This stream is set to the - current CUDA stream when the context manager is entered and reset to the - default stream when the context manager is exited. This is to ensure that - the graph capture is running on a separate stream from the default stream, - in order to explicitly distinguish the kernels to capture - from other kernels possibly launched on background in the default stream. - """ - stream = torch.cuda.Stream() - graph_capture_context = GraphCaptureContext(stream) - ca_comm = get_tp_ca_communicator() - maybe_ca_context = nullcontext() if ca_comm is None else ca_comm.capture() - with torch.cuda.stream(stream), maybe_ca_context: - # In graph mode, we have to be very careful about the collective - # operations. The current status is: - # allreduce \ Mode | Eager | Graph | - # -------------------------------------------- - # custom allreduce | enabled | enabled | - # PyNccl | disabled| enabled | - # torch.distributed | enabled | disabled| - # - # Note that custom allreduce will have a runtime check, if the tensor - # size is too large, it will fallback to the next available option. - # In summary: When using CUDA graph, we use - # either custom all-reduce kernel or pynccl. When not using CUDA - # graph, we use either custom all-reduce kernel or PyTorch NCCL. - # We always prioritize using custom all-reduce kernel but fall back - # to PyTorch or pynccl if it is disabled or not supported. - tp_pynccl_comm = get_tp_pynccl_communicator() - pp_pynccl_comm = get_pp_pynccl_communicator() - if not tp_pynccl_comm: - maybe_tp_pynccl_context = nullcontext() - else: - maybe_tp_pynccl_context = tp_pynccl_comm.change_state( - enable=True, stream=torch.cuda.current_stream()) - if not pp_pynccl_comm: - maybe_pp_pynccl_context = nullcontext() - else: - maybe_pp_pynccl_context = pp_pynccl_comm.change_state( - enable=True, stream=torch.cuda.current_stream()) - with maybe_tp_pynccl_context, maybe_pp_pynccl_context: - yield graph_capture_context +from .parallel_state import get_tp_group def tensor_model_parallel_all_reduce(input_: torch.Tensor) -> torch.Tensor: - """All-reduce the input tensor across model parallel group. - - NOTE: This operation will be applied in-place on the input tensor if - disable_custom_all_reduce is set to True. Otherwise, this operation may or - may not be applied in place depending on whether custom all reduce is - invoked for a particular tensor, which further depends on the tensor size - and GPU topology. - - TLDR: always assume this function modifies its input, but use the return - value as the output. - """ - ca_comm = get_tp_ca_communicator() - - # Bypass the function if we are using only 1 GPU. - if get_tensor_model_parallel_world_size() == 1: - return input_ - if ca_comm is not None: - out = ca_comm.custom_all_reduce(input_) - if out is not None: - return out - pynccl_comm = get_tp_pynccl_communicator() - if (pynccl_comm is not None and not pynccl_comm.disabled): - pynccl_comm.all_reduce(input_) - else: - torch.distributed.all_reduce(input_, - group=get_tensor_model_parallel_group()) - return input_ + """All-reduce the input tensor across model parallel group.""" + return get_tp_group().all_reduce(input_) def tensor_model_parallel_all_gather(input_: torch.Tensor, dim: int = -1) -> torch.Tensor: """All-gather the input tensor across model parallel group.""" - world_size = get_tensor_model_parallel_world_size() - # Bypass the function if we are using only 1 GPU. - if world_size == 1: - return input_ - assert -input_.dim() <= dim < input_.dim(), ( - f"Invalid dim ({dim}) for input tensor with shape {input_.size()}") - if dim < 0: - # Convert negative dim to positive. - dim += input_.dim() - input_size = input_.size() - # Allocate output tensor. - output_tensor = torch.empty((world_size, ) + input_size, - dtype=input_.dtype, - device=input_.device) - # All-gather. - torch.distributed.all_gather_into_tensor( - output_tensor, input_, group=get_tensor_model_parallel_group()) - # Reshape - output_tensor = output_tensor.movedim(0, dim) - output_tensor = output_tensor.reshape(input_size[:dim] + - (world_size * input_size[dim], ) + - input_size[dim + 1:]) - return output_tensor + return get_tp_group().all_gather(input_, dim) def tensor_model_parallel_gather(input_: torch.Tensor, dst: int = 0, dim: int = -1) -> torch.Tensor: - """Gather the input tensor across model parallel group. - - NOTE: We assume that the input tensor is on the same device across - all the ranks. - """ - world_size = get_tensor_model_parallel_world_size() - # Bypass the function if we are using only 1 GPU. - if world_size == 1: - return input_ - assert -input_.dim() <= dim < input_.dim(), ( - f"Invalid dim ({dim}) for input tensor with shape {input_.size()}") - if dim < 0: - # Convert negative dim to positive. - dim += input_.dim() - # Allocate output tensor. - if get_tensor_model_parallel_rank() == dst: - gather_list = [torch.empty_like(input_) for _ in range(world_size)] - else: - gather_list = None - # Gather. - torch.distributed.gather(input_, - gather_list, - dst=dst, - group=get_tensor_model_parallel_group()) - if get_tensor_model_parallel_rank() == dst: - output_tensor = torch.cat(gather_list, dim=dim) - else: - output_tensor = None - return output_tensor - - -def broadcast(input_: torch.Tensor, - src: int = 0, - group: Optional[ProcessGroup] = None): - """Broadcast the input tensor.""" - group = group or torch.distributed.group.WORLD - ranks = torch.distributed.get_process_group_ranks(group) - assert src in ranks, f"Invalid src rank ({src})" - - # Bypass the function if we are using only 1 GPU. - world_size = torch.distributed.get_world_size(group=group) - if world_size == 1: - return input_ - # Broadcast. - torch.distributed.broadcast(input_, src=src, group=group) - return input_ + """Gather the input tensor across model parallel group.""" + return get_tp_group().gather(input_, dst, dim) -def broadcast_object_list(obj_list: List[Any], - src: int = 0, - group: Optional[ProcessGroup] = None): - """Broadcast the input object list.""" - group = group or torch.distributed.group.WORLD - ranks = torch.distributed.get_process_group_ranks(group) - assert src in ranks, f"Invalid src rank ({src})" - - # Bypass the function if we are using only 1 GPU. - world_size = torch.distributed.get_world_size(group=group) - if world_size == 1: - return obj_list - # Broadcast. - torch.distributed.broadcast_object_list(obj_list, src=src, group=group) - return obj_list - - -TensorMetadata = namedtuple("TensorMetadata", ["device", "dtype", "size"]) - - -def _split_tensor_dict( - tensor_dict: Dict[Any, Union[torch.Tensor, Any]] -) -> Tuple[List[Tuple[str, Any]], List[torch.Tensor]]: - """Split the tensor dictionary into two parts: - 1. A list of (key, value) pairs. If the value is a tensor, it is replaced - by its metadata. - 2. A list of tensors. - """ - metadata_list = [] - tensor_list = [] - for key, value in tensor_dict.items(): - if isinstance(value, torch.Tensor): - # Note: we cannot use `value.device` here, - # because it contains not only the device type but also the device - # index (e.g. "cuda:0"). We only need the device type. - # receiving side will set the device index. - device = "cpu" if value.is_cpu else "cuda" - metadata_list.append( - (key, TensorMetadata(device, value.dtype, value.size()))) - tensor_list.append(value) - else: - metadata_list.append((key, value)) - return metadata_list, tensor_list - - -def broadcast_tensor_dict( - tensor_dict: Optional[Dict[Any, Union[torch.Tensor, Any]]] = None, - src: int = 0, - group: Optional[ProcessGroup] = None, - metadata_group: Optional[ProcessGroup] = None -) -> Optional[Dict[Any, Union[torch.Tensor, Any]]]: - """Broadcast the input tensor dictionary. - `group` is used to broadcast the tensors, while `metadata_group` is used - to broadcast the metadata of the dict (e.g. dict structure, tensor sizes, - dtypes). - """ - # Bypass the function if we are using only 1 GPU. - if (not torch.distributed.is_initialized() - or torch.distributed.get_world_size(group=group) == 1): +def broadcast_tensor_dict(tensor_dict: Optional[Dict[Any, Union[torch.Tensor, + Any]]] = None, + src: int = 0): + if not torch.distributed.is_initialized(): return tensor_dict - - group = group or torch.distributed.group.WORLD - metadata_group = metadata_group or get_cpu_world_group() - ranks = torch.distributed.get_process_group_ranks(group) - assert src in ranks, f"Invalid src rank ({src})" - - rank = torch.distributed.get_rank() - if rank == src: - metadata_list: List[Tuple[Any, Any]] = [] - assert isinstance( - tensor_dict, - dict), (f"Expecting a dictionary, got {type(tensor_dict)}") - metadata_list, tensor_list = _split_tensor_dict(tensor_dict) - # `metadata_list` lives in CPU memory. - # `broadcast_object_list` involves serialization and deserialization, - # all happening on CPU. Therefore, we can use the CPU group. - torch.distributed.broadcast_object_list([metadata_list], - src=src, - group=metadata_group) - async_handles = [] - for tensor in tensor_list: - if tensor.numel() == 0: - # Skip broadcasting empty tensors. - continue - if tensor.is_cpu: - # use metadata_group for CPU tensors - handle = torch.distributed.broadcast(tensor, - src=src, - group=metadata_group, - async_op=True) - else: - # use group for GPU tensors - handle = torch.distributed.broadcast(tensor, - src=src, - group=group, - async_op=True) - async_handles.append(handle) - for async_handle in async_handles: - async_handle.wait() - - else: - recv_metadata_list = [None] - torch.distributed.broadcast_object_list(recv_metadata_list, - src=src, - group=metadata_group) - assert recv_metadata_list[0] is not None - tensor_dict = {} - async_handles = [] - for key, value in recv_metadata_list[0]: - if isinstance(value, TensorMetadata): - tensor = torch.empty(value.size, - dtype=value.dtype, - device=value.device) - if tensor.numel() == 0: - # Skip broadcasting empty tensors. - tensor_dict[key] = tensor - continue - if tensor.is_cpu: - # use metadata_group for CPU tensors - handle = torch.distributed.broadcast(tensor, - src=src, - group=metadata_group, - async_op=True) - else: - # use group for GPU tensors - handle = torch.distributed.broadcast(tensor, - src=src, - group=group, - async_op=True) - async_handles.append(handle) - tensor_dict[key] = tensor - else: - tensor_dict[key] = value - for async_handle in async_handles: - async_handle.wait() - return tensor_dict + return get_tp_group().broadcast_tensor_dict(tensor_dict, src) diff --git a/vllm/distributed/device_communicators/custom_all_reduce.py b/vllm/distributed/device_communicators/custom_all_reduce.py index bbc2284f8a364..b0cb21a022789 100644 --- a/vllm/distributed/device_communicators/custom_all_reduce.py +++ b/vllm/distributed/device_communicators/custom_all_reduce.py @@ -9,9 +9,9 @@ from vllm import _custom_ops as ops from vllm.distributed.device_communicators.custom_all_reduce_utils import ( gpu_p2p_access_check) -from vllm.distributed.parallel_state import ( - get_local_rank, get_tensor_model_parallel_cpu_group, is_in_the_same_node) +from vllm.distributed.parallel_state import is_in_the_same_node from vllm.logger import init_logger +from vllm.utils import cuda_device_count_stateless try: import pynvml @@ -86,8 +86,8 @@ class CustomAllreduce: # max_size: max supported allreduce size def __init__(self, - group: Optional[ProcessGroup] = None, - device: Optional[Union[int, str, torch.device]] = None, + group: ProcessGroup, + device: Union[int, str, torch.device], max_size=8192 * 1024) -> None: """ Args: @@ -107,7 +107,6 @@ def __init__(self, # e.g. in a non-cuda environment return - group = group or get_tensor_model_parallel_cpu_group() self.group = group assert dist.get_backend(group) != dist.Backend.NCCL, ( @@ -134,10 +133,7 @@ def __init__(self, world_size, str(CustomAllreduce._SUPPORTED_WORLD_SIZES)) return - if device is None: - local_rank = get_local_rank() - device = torch.device(f"cuda:{local_rank}") - elif isinstance(device, int): + if isinstance(device, int): device = torch.device(f"cuda:{device}") elif isinstance(device, str): device = torch.device(device) @@ -149,7 +145,7 @@ def __init__(self, if cuda_visible_devices: device_ids = list(map(int, cuda_visible_devices.split(","))) else: - device_ids = list(range(torch.cuda.device_count())) + device_ids = list(range(cuda_device_count_stateless())) physical_device_id = device_ids[device.index] tensor = torch.tensor([physical_device_id], diff --git a/vllm/distributed/device_communicators/custom_all_reduce_utils.py b/vllm/distributed/device_communicators/custom_all_reduce_utils.py index 4b89a23dfc463..c9573edb08f33 100644 --- a/vllm/distributed/device_communicators/custom_all_reduce_utils.py +++ b/vllm/distributed/device_communicators/custom_all_reduce_utils.py @@ -11,8 +11,8 @@ import torch.multiprocessing as mp import vllm.envs as envs -from vllm.distributed.parallel_state import get_cpu_world_group, get_local_rank from vllm.logger import init_logger +from vllm.utils import cuda_device_count_stateless logger = init_logger(__name__) @@ -153,7 +153,7 @@ def gpu_p2p_access_check(i: int, j: int) -> bool: is_distributed = dist.is_initialized() - num_dev = torch.cuda.device_count() + num_dev = cuda_device_count_stateless() cuda_visible_devices = envs.CUDA_VISIBLE_DEVICES if cuda_visible_devices is None: cuda_visible_devices = ",".join(str(i) for i in range(num_dev)) @@ -162,7 +162,8 @@ def gpu_p2p_access_check(i: int, j: int) -> bool: f"{VLLM_CONFIG_ROOT}/vllm/gpu_p2p_access_cache_for_{cuda_visible_devices}.json" ) os.makedirs(os.path.dirname(path), exist_ok=True) - if ((not is_distributed or get_local_rank() == 0) + from vllm.distributed.parallel_state import get_world_group + if ((not is_distributed or get_world_group().local_rank == 0) and (not os.path.exists(path))): # only the local master process (with local_rank == 0) can # enter this block to calculate the cache @@ -174,8 +175,7 @@ def gpu_p2p_access_check(i: int, j: int) -> bool: with open(path, "w") as f: json.dump(cache, f, indent=4) if is_distributed: - cpu_world_group = get_cpu_world_group() - dist.barrier(cpu_world_group) + get_world_group().barrier() logger.info("reading GPU P2P access cache from %s", path) with open(path, "r") as f: cache = json.load(f) diff --git a/vllm/distributed/device_communicators/pynccl.py b/vllm/distributed/device_communicators/pynccl.py index f5f1de0c71615..83eec264b6f81 100644 --- a/vllm/distributed/device_communicators/pynccl.py +++ b/vllm/distributed/device_communicators/pynccl.py @@ -9,7 +9,6 @@ from vllm.distributed.device_communicators.pynccl_wrapper import ( NCCLLibrary, buffer_type, cudaStream_t, ncclComm_t, ncclDataTypeEnum, ncclRedOpTypeEnum, ncclUniqueId) -from vllm.distributed.parallel_state import get_cpu_world_group, get_local_rank from vllm.logger import init_logger logger = init_logger(__name__) @@ -19,8 +18,8 @@ class PyNcclCommunicator: def __init__( self, - group: Optional[ProcessGroup] = None, - device: Optional[Union[int, str, torch.device]] = None, + group: ProcessGroup, + device: Union[int, str, torch.device], library_path: Optional[str] = None, ): """ @@ -35,7 +34,6 @@ def __init__( is bind to a unique device. """ assert dist.is_initialized() - group = get_cpu_world_group() if group is None else group assert dist.get_backend(group) != dist.Backend.NCCL, ( "PyNcclCommunicator should be attached to a non-NCCL group.") self.group = group @@ -77,10 +75,7 @@ def __init__( byte_list = tensor.tolist() for i, byte in enumerate(byte_list): self.unique_id.internal[i] = byte - if device is None: - local_rank = get_local_rank() - device = torch.device(f"cuda:{local_rank}") - elif isinstance(device, int): + if isinstance(device, int): device = torch.device(f"cuda:{device}") elif isinstance(device, str): device = torch.device(device) diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index b6d1eeff09786..f6a2fc9b05a84 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -2,81 +2,518 @@ # Adapted from # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -"""Tensor and pipeline parallel groups.""" +"""vLLM distributed state. +It takes over the control of the distributed environment from PyTorch. +The typical workflow is: + +- call `init_distributed_environment` to initialize the distributed environment. +- call `initialize_model_parallel` or `ensure_model_parallel_initialized` to + initialize the model parallel groups. + +- any code dealing with the distributed stuff + +- call `destroy_model_parallel` to destroy the model parallel groups. +- call `destroy_distributed_environment` to destroy the distributed environment. + +If you only need to use the distributed environment without model/pipeline + parallelism, you can skip the model parallel initialization and destruction + steps. +""" import contextlib +from collections import namedtuple +from contextlib import contextmanager, nullcontext +from dataclasses import dataclass from multiprocessing import resource_tracker, shared_memory -from typing import List, Optional +from typing import Any, Dict, List, Optional, Tuple, Union import torch -from torch.distributed import ProcessGroup +from torch.distributed import Backend, ProcessGroup import vllm.envs as envs from vllm.logger import init_logger -logger = init_logger(__name__) -_ENABLE_CUSTOM_ALL_REDUCE = True +@dataclass +class GraphCaptureContext: + stream: torch.cuda.Stream -# Tensor model parallel group that the current rank belongs to. -_TP_DEVICE_GROUP: Optional[ProcessGroup] = None -_TP_CPU_GROUP: Optional[ProcessGroup] = None -_TP_PYNCCL_COMMUNICATOR = None -_TP_CA_COMMUNICATOR = None -# Pipeline model parallel group that the current rank belongs to. -_PP_DEVICE_GROUP: Optional[ProcessGroup] = None -_PP_CPU_GROUP: Optional[ProcessGroup] = None -_PP_PYNCCL_COMMUNICATOR = None - -# when people blindly call `torch.distributed.all_reduce` etc, -# it will use this group. It is initialized with the `backend` -# parameter of `init_distributed_environment` below. -# Essentially, this is `torch.distributed.group.WORLD`. -# We leave a line here to note that this is device-specific. -# Note that this variable is not safe to use, because when users -# call `init_distributed_environment` first, and then destroy -# the process group themselves, this variable will keep a reference to the -# destroyed process group, which is not useful. -_DEVICE_WORLD_GROUP = None - -# duing `init_distributed_environment`, we will also initialize a -# group with `gloo` backend, to allow direct coordination between -# processes through the CPU. -_CPU_WORLD_GROUP = None - -# In summary, after calling `init_distributed_environment`, we will -# always have two groups: one for device-specific (and is the default) -# and one for CPU. All processes will be part of both groups. - -# A list of global ranks for each pipeline group to ease calculation of the -# source rank when broadcasting from the first or last pipeline stage. -_PP_GLOBAL_RANKS: Optional[List[int]] = None - -_LOCAL_RANK = -1 +TensorMetadata = namedtuple("TensorMetadata", ["device", "dtype", "size"]) -def set_custom_all_reduce(enable: bool): - global _ENABLE_CUSTOM_ALL_REDUCE - _ENABLE_CUSTOM_ALL_REDUCE = enable +def _split_tensor_dict( + tensor_dict: Dict[Any, Union[torch.Tensor, Any]] +) -> Tuple[List[Tuple[str, Any]], List[torch.Tensor]]: + """Split the tensor dictionary into two parts: + 1. A list of (key, value) pairs. If the value is a tensor, it is replaced + by its metadata. + 2. A list of tensors. + """ + metadata_list = [] + tensor_list = [] + for key, value in tensor_dict.items(): + if isinstance(value, torch.Tensor): + # Note: we cannot use `value.device` here, + # because it contains not only the device type but also the device + # index (e.g. "cuda:0"). We only need the device type. + # receiving side will set the device index. + device = "cpu" if value.is_cpu else "cuda" + metadata_list.append( + (key, TensorMetadata(device, value.dtype, value.size()))) + tensor_list.append(value) + else: + metadata_list.append((key, value)) + return metadata_list, tensor_list -def get_pp_pynccl_communicator(): - global _PP_PYNCCL_COMMUNICATOR - return _PP_PYNCCL_COMMUNICATOR +class GroupCoordinator: + """ + PyTorch ProcessGroup wrapper for a group of processes. + PyTorch ProcessGroup is bound to one specific communication backend, + e.g. NCCL, Gloo, MPI, etc. + GroupCoordinator takes charge of all the communication operations among + the processes in the group. It can route the communication to + a specific implementation (e.g. switch allreduce implementation + based on the tensor size and cuda graph mode). + """ -def get_tp_pynccl_communicator(): - global _TP_PYNCCL_COMMUNICATOR - return _TP_PYNCCL_COMMUNICATOR + # available attributes: + rank: int # global rank + ranks: List[int] # global ranks in the group + world_size: int # size of the group + # difference between `local_rank` and `rank_in_group`: + # if we have a group of size 4 across two nodes: + # Process | Node | Rank | Local Rank | Rank in Group + # 0 | 0 | 0 | 0 | 0 + # 1 | 0 | 1 | 1 | 1 + # 2 | 1 | 2 | 0 | 2 + # 3 | 1 | 3 | 1 | 3 + local_rank: int # local rank used to assign devices + rank_in_group: int # rank inside the group + cpu_group: ProcessGroup # group for CPU communication + device_group: ProcessGroup # group for device communication + use_pynccl: bool # a hint of whether to use PyNccl + use_custom_allreduce: bool # a hint of whether to use CustomAllreduce + # communicators are only created for world size > 1 + pynccl_comm: Optional[Any] # PyNccl communicator + ca_comm: Optional[Any] # Custom allreduce communicator + + def __init__( + self, + group_ranks: List[List[int]], + local_rank: int, + torch_distributed_backend: Union[str, Backend], + use_pynccl: bool, + use_custom_allreduce: bool, + ): + + self.rank = torch.distributed.get_rank() + self.local_rank = local_rank + self.device_group = None + self.cpu_group = None + + for ranks in group_ranks: + device_group = torch.distributed.new_group( + ranks, backend=torch_distributed_backend) + # a group with `gloo` backend, to allow direct coordination between + # processes through the CPU. + cpu_group = torch.distributed.new_group(ranks, backend="gloo") + if self.rank in ranks: + self.ranks = ranks + self.world_size = len(ranks) + self.rank_in_group = ranks.index(self.rank) + self.device_group = device_group + self.cpu_group = cpu_group + + assert self.cpu_group is not None + assert self.device_group is not None + if torch.cuda.is_available(): + self.device = torch.device(f"cuda:{local_rank}") + else: + self.device = torch.device("cpu") -def get_tp_ca_communicator(): - global _TP_CA_COMMUNICATOR - return _TP_CA_COMMUNICATOR + self.use_pynccl = use_pynccl + self.use_custom_allreduce = use_custom_allreduce + + # lazy import to avoid documentation build error + from vllm.distributed.device_communicators.custom_all_reduce import ( + CustomAllreduce) + from vllm.distributed.device_communicators.pynccl import ( + PyNcclCommunicator) + + self.pynccl_comm: Optional[PyNcclCommunicator] + if use_pynccl and self.world_size > 1: + self.pynccl_comm = PyNcclCommunicator( + group=self.cpu_group, + device=self.device, + ) + else: + self.pynccl_comm = None + + self.ca_comm: Optional[CustomAllreduce] + if use_custom_allreduce and self.world_size > 1: + # Initialize a custom fast all-reduce implementation. + self.ca_comm = CustomAllreduce( + group=self.cpu_group, + device=self.device, + ) + else: + self.ca_comm = None + + @property + def first_rank(self): + """Return the global rank of the first process in the group""" + return self.ranks[0] + + @property + def last_rank(self): + """Return the global rank of the last process in the group""" + return self.ranks[-1] + + @property + def next_rank(self): + """Return the global rank of the process that follows the caller""" + rank_in_group = self.rank_in_group + world_size = self.world_size + return self.ranks[(rank_in_group + 1) % world_size] + + @property + def prev_rank(self): + """Return the global rank of the process that precedes the caller""" + rank_in_group = self.rank_in_group + world_size = self.world_size + return self.ranks[(rank_in_group - 1) % world_size] + + @contextmanager + def graph_capture( + self, graph_capture_context: Optional[GraphCaptureContext] = None): + if graph_capture_context is None: + stream = torch.cuda.Stream() + graph_capture_context = GraphCaptureContext(stream) + else: + stream = graph_capture_context.stream + + ca_comm = self.ca_comm + maybe_ca_context = nullcontext( + ) if ca_comm is None else ca_comm.capture() + with torch.cuda.stream(stream), maybe_ca_context: + # In graph mode, we have to be very careful about the collective + # operations. The current status is: + # allreduce \ Mode | Eager | Graph | + # -------------------------------------------- + # custom allreduce | enabled | enabled | + # PyNccl | disabled| enabled | + # torch.distributed | enabled | disabled| + # + # Note that custom allreduce will have a runtime check, if the + # tensor size is too large, it will fallback to the next + # available option. + # In summary: When using CUDA graph, we use + # either custom all-reduce kernel or pynccl. When not using + # CUDA graph, we use either custom all-reduce kernel or + # PyTorch NCCL. We always prioritize using custom all-reduce + # kernel but fall back to PyTorch or pynccl if it is + # disabled or not supported. + pynccl_comm = self.pynccl_comm + maybe_pynccl_context: Any + if not pynccl_comm: + maybe_pynccl_context = nullcontext() + else: + maybe_pynccl_context = pynccl_comm.change_state( + enable=True, stream=torch.cuda.current_stream()) + with maybe_pynccl_context: + yield graph_capture_context + + def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: + """ + NOTE: This operation will be applied in-place or out-of-place. + Always assume this function modifies its input, but use the return + value as the output. + """ + ca_comm = self.ca_comm + + # Bypass the function if we are using only 1 GPU. + if self.world_size == 1: + return input_ + if ca_comm is not None: + out = ca_comm.custom_all_reduce(input_) + if out is not None: + return out + pynccl_comm = self.pynccl_comm + if (pynccl_comm is not None and not pynccl_comm.disabled): + pynccl_comm.all_reduce(input_) + else: + torch.distributed.all_reduce(input_, group=self.device_group) + return input_ + + def all_gather(self, input_: torch.Tensor, dim: int = -1) -> torch.Tensor: + world_size = self.world_size + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + assert -input_.dim() <= dim < input_.dim(), ( + f"Invalid dim ({dim}) for input tensor with shape {input_.size()}") + if dim < 0: + # Convert negative dim to positive. + dim += input_.dim() + input_size = input_.size() + # Allocate output tensor. + output_tensor = torch.empty((world_size, ) + input_size, + dtype=input_.dtype, + device=input_.device) + # All-gather. + torch.distributed.all_gather_into_tensor(output_tensor, + input_, + group=self.device_group) + # Reshape + output_tensor = output_tensor.movedim(0, dim) + output_tensor = output_tensor.reshape(input_size[:dim] + + (world_size * + input_size[dim], ) + + input_size[dim + 1:]) + return output_tensor + + def gather(self, + input_: torch.Tensor, + dst: int = 0, + dim: int = -1) -> torch.Tensor: + """ + NOTE: We assume that the input tensor is on the same device across + all the ranks. + NOTE: `dst` is the local rank of the destination rank. + """ + world_size = self.world_size + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + assert -input_.dim() <= dim < input_.dim(), ( + f"Invalid dim ({dim}) for input tensor with shape {input_.size()}") + if dim < 0: + # Convert negative dim to positive. + dim += input_.dim() + # Allocate output tensor. + if self.rank_in_group == dst: + gather_list = [torch.empty_like(input_) for _ in range(world_size)] + else: + gather_list = None + # Gather. + torch.distributed.gather(input_, + gather_list, + dst=self.ranks[dst], + group=self.device_group) + if self.rank_in_group == dst: + output_tensor = torch.cat(gather_list, dim=dim) + else: + output_tensor = None + return output_tensor + + def broadcast(self, input_: torch.Tensor, src: int = 0): + """Broadcast the input tensor. + NOTE: `src` is the local rank of the source rank. + """ + assert src < self.world_size, f"Invalid src rank ({src})" + + # Bypass the function if we are using only 1 GPU. + if self.world_size == 1: + return input_ + # Broadcast. + torch.distributed.broadcast(input_, + src=self.ranks[src], + group=self.device_group) + return input_ + + def broadcast_object_list(self, + obj_list: List[Any], + src: int = 0, + group: Optional[ProcessGroup] = None): + """Broadcast the input object list. + NOTE: `src` is the local rank of the source rank. + """ + assert src < self.world_size, f"Invalid src rank ({src})" + + # Bypass the function if we are using only 1 GPU. + if self.world_size == 1: + return obj_list + # Broadcast. + torch.distributed.broadcast_object_list(obj_list, + src=self.ranks[src], + group=self.device_group) + return obj_list + + def broadcast_tensor_dict( + self, + tensor_dict: Optional[Dict[Any, Union[torch.Tensor, Any]]] = None, + src: int = 0, + group: Optional[ProcessGroup] = None, + metadata_group: Optional[ProcessGroup] = None + ) -> Optional[Dict[Any, Union[torch.Tensor, Any]]]: + """Broadcast the input tensor dictionary. + NOTE: `src` is the local rank of the source rank. + """ + # Bypass the function if we are using only 1 GPU. + if (not torch.distributed.is_initialized() or self.world_size == 1): + return tensor_dict + + group = self.device_group + metadata_group = self.cpu_group + assert src < self.world_size, f"Invalid src rank ({src})" + src = self.ranks[src] + + rank = self.rank + if rank == src: + metadata_list: List[Tuple[Any, Any]] = [] + assert isinstance( + tensor_dict, + dict), (f"Expecting a dictionary, got {type(tensor_dict)}") + metadata_list, tensor_list = _split_tensor_dict(tensor_dict) + # `metadata_list` lives in CPU memory. + # `broadcast_object_list` has serialization & deserialization, + # all happening on CPU. Therefore, we can use the CPU group. + torch.distributed.broadcast_object_list([metadata_list], + src=src, + group=metadata_group) + async_handles = [] + for tensor in tensor_list: + if tensor.numel() == 0: + # Skip broadcasting empty tensors. + continue + if tensor.is_cpu: + # use metadata_group for CPU tensors + handle = torch.distributed.broadcast(tensor, + src=src, + group=metadata_group, + async_op=True) + else: + # use group for GPU tensors + handle = torch.distributed.broadcast(tensor, + src=src, + group=group, + async_op=True) + async_handles.append(handle) + for async_handle in async_handles: + async_handle.wait() + + else: + recv_metadata_list = [None] + torch.distributed.broadcast_object_list(recv_metadata_list, + src=src, + group=metadata_group) + assert recv_metadata_list[0] is not None + tensor_dict = {} + async_handles = [] + for key, value in recv_metadata_list[0]: + if isinstance(value, TensorMetadata): + tensor = torch.empty(value.size, + dtype=value.dtype, + device=value.device) + if tensor.numel() == 0: + # Skip broadcasting empty tensors. + tensor_dict[key] = tensor + continue + if tensor.is_cpu: + # use metadata_group for CPU tensors + handle = torch.distributed.broadcast( + tensor, + src=src, + group=metadata_group, + async_op=True) + else: + # use group for GPU tensors + handle = torch.distributed.broadcast(tensor, + src=src, + group=group, + async_op=True) + async_handles.append(handle) + tensor_dict[key] = tensor + else: + tensor_dict[key] = value + for async_handle in async_handles: + async_handle.wait() + return tensor_dict + + def barrier(self): + """Barrier synchronization among the group. + NOTE: don't use `device_group` here! `barrier` in NCCL is + terrible because it is internally a broadcast operation with + secretly created GPU tensors. It is easy to mess up the current + device. Use the CPU group instead. + """ + torch.distributed.barrier(group=self.cpu_group) + + def destroy(self): + if self.device_group is not None: + torch.distributed.destroy_process_group(self.device_group) + self.device_group = None + if self.cpu_group is not None: + torch.distributed.destroy_process_group(self.cpu_group) + self.cpu_group = None + if self.pynccl_comm is not None: + self.pynccl_comm = None + if self.ca_comm is not None: + self.ca_comm = None + + +_WORLD: Optional[GroupCoordinator] = None + + +def get_world_group() -> GroupCoordinator: + assert _WORLD is not None, ("world group is not initialized") + return _WORLD + + +_TP: Optional[GroupCoordinator] = None + + +def get_tp_group() -> GroupCoordinator: + assert _TP is not None, ("tensor model parallel group is not initialized") + return _TP + + +# kept for backward compatibility +get_tensor_model_parallel_group = get_tp_group + +_PP: Optional[GroupCoordinator] = None + + +def get_pp_group() -> GroupCoordinator: + assert _PP is not None, ( + "pipeline model parallel group is not initialized") + return _PP -def get_local_rank(): - global _LOCAL_RANK - return _LOCAL_RANK +# kept for backward compatibility +get_pipeline_model_parallel_group = get_pp_group + + +@contextmanager +def graph_capture(): + """ + `graph_capture` is a context manager which should surround the code that + is capturing the CUDA graph. Its main purpose is to ensure that the + some operations will be run after the graph is captured, before the graph + is replayed. It returns a `GraphCaptureContext` object which contains the + necessary data for the graph capture. Currently, it only contains the + stream that the graph capture is running on. This stream is set to the + current CUDA stream when the context manager is entered and reset to the + default stream when the context manager is exited. This is to ensure that + the graph capture is running on a separate stream from the default stream, + in order to explicitly distinguish the kernels to capture + from other kernels possibly launched on background in the default stream. + """ + with get_tp_group().graph_capture() as context, get_pp_group( + ).graph_capture(context): + yield context + + +logger = init_logger(__name__) + +_ENABLE_CUSTOM_ALL_REDUCE = True + + +def set_custom_all_reduce(enable: bool): + global _ENABLE_CUSTOM_ALL_REDUCE + _ENABLE_CUSTOM_ALL_REDUCE = enable def init_distributed_environment( @@ -100,31 +537,29 @@ def init_distributed_environment( init_method=distributed_init_method, world_size=world_size, rank=rank) - global _DEVICE_WORLD_GROUP, _CPU_WORLD_GROUP - _DEVICE_WORLD_GROUP = torch.distributed.group.WORLD + # set the local rank + # local_rank is not available in torch ProcessGroup, + # see https://github.com/pytorch/pytorch/issues/122816 + if local_rank == -1: + # local rank not set, this usually happens in single-node + # setting, where we can use rank as local rank + if distributed_init_method == "env://": + local_rank = envs.LOCAL_RANK + else: + local_rank = rank + global _WORLD + if _WORLD is None: ranks = list(range(torch.distributed.get_world_size())) - _CPU_WORLD_GROUP = torch.distributed.new_group(ranks=ranks, - backend="gloo") - # set the local rank - # local_rank is not available in torch ProcessGroup, - # see https://github.com/pytorch/pytorch/issues/122816 - if local_rank == -1: - # local rank not set, this usually happens in single-node - # setting, where we can use rank as local rank - if distributed_init_method == "env://": - local_rank = envs.LOCAL_RANK - else: - local_rank = rank - global _LOCAL_RANK - _LOCAL_RANK = local_rank - # A small all_reduce for warmup. - data = torch.zeros(1) - if torch.cuda.is_available(): - data = data.to(device=f"cuda:{local_rank}") - torch.distributed.all_reduce(data) - if torch.cuda.is_available(): - torch.cuda.synchronize() - del data + _WORLD = GroupCoordinator( + group_ranks=[ranks], + local_rank=local_rank, + torch_distributed_backend=backend, + use_pynccl=False, + use_custom_allreduce=False, + ) + else: + assert _WORLD.world_size == torch.distributed.get_world_size(), ( + "world group already initialized with a different world size") def initialize_model_parallel( @@ -157,8 +592,8 @@ def initialize_model_parallel( # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() world_size: int = torch.distributed.get_world_size() - # get the backend of _DEVICE_WORLD_GROUP - backend = backend or torch.distributed.get_backend() + backend = backend or torch.distributed.get_backend( + get_world_group().device_group) if (world_size != tensor_model_parallel_size * pipeline_model_parallel_size): @@ -167,63 +602,42 @@ def initialize_model_parallel( f"tensor_model_parallel_size ({tensor_model_parallel_size}) x " f"pipeline_model_parallel_size ({pipeline_model_parallel_size})") + # Build the tensor model-parallel groups. num_tensor_model_parallel_groups: int = (world_size // tensor_model_parallel_size) - num_pipeline_model_parallel_groups: int = (world_size // - pipeline_model_parallel_size) - rank = torch.distributed.get_rank() - - # Build the tensor model-parallel groups. - global _TP_DEVICE_GROUP, _TP_CPU_GROUP - global _TP_PYNCCL_COMMUNICATOR, _TP_CA_COMMUNICATOR - assert _TP_DEVICE_GROUP is None, ( - "tensor model parallel group is already initialized") + global _TP + assert _TP is None, ("tensor model parallel group is already initialized") + group_ranks = [] for i in range(num_tensor_model_parallel_groups): ranks = list( range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)) - group = torch.distributed.new_group(ranks, backend=backend) - cpu_group = torch.distributed.new_group(ranks, backend="gloo") - if rank in ranks: - _TP_DEVICE_GROUP = group - _TP_CPU_GROUP = cpu_group - - from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator - if tensor_model_parallel_size > 1: - _TP_PYNCCL_COMMUNICATOR = PyNcclCommunicator( - group=_TP_CPU_GROUP, - device=_LOCAL_RANK, - ) - - # Initialize a custom fast all-reduce implementation. - if _ENABLE_CUSTOM_ALL_REDUCE: - from vllm.distributed.device_communicators.custom_all_reduce import ( - CustomAllreduce) - _TP_CA_COMMUNICATOR = CustomAllreduce( - group=_TP_CPU_GROUP, - device=_LOCAL_RANK, - ) + group_ranks.append(ranks) + _TP = GroupCoordinator( + group_ranks=group_ranks, + local_rank=get_world_group().local_rank, + torch_distributed_backend=backend, + use_pynccl=True, + use_custom_allreduce=_ENABLE_CUSTOM_ALL_REDUCE, + ) # Build the pipeline model-parallel groups. - global _PP_DEVICE_GROUP, _PP_CPU_GROUP - global _PP_PYNCCL_COMMUNICATOR - global _PP_GLOBAL_RANKS - assert _PP_DEVICE_GROUP is None, ( + num_pipeline_model_parallel_groups: int = (world_size // + pipeline_model_parallel_size) + global _PP + assert _PP is None, ( "pipeline model parallel group is already initialized") + group_ranks = [] for i in range(num_pipeline_model_parallel_groups): ranks = list(range(i, world_size, num_pipeline_model_parallel_groups)) - group = torch.distributed.new_group(ranks, backend=backend) - cpu_group = torch.distributed.new_group(ranks, backend="gloo") - if rank in ranks: - _PP_DEVICE_GROUP = group - _PP_CPU_GROUP = cpu_group - _PP_GLOBAL_RANKS = ranks - - if pipeline_model_parallel_size > 1: - _PP_PYNCCL_COMMUNICATOR = PyNcclCommunicator( - group=_PP_CPU_GROUP, - device=_LOCAL_RANK, - ) + group_ranks.append(ranks) + _PP = GroupCoordinator( + group_ranks=group_ranks, + local_rank=get_world_group().local_rank, + torch_distributed_backend=backend, + use_pynccl=True, + use_custom_allreduce=_ENABLE_CUSTOM_ALL_REDUCE, + ) def ensure_model_parallel_initialized( @@ -235,8 +649,8 @@ def ensure_model_parallel_initialized( or ensure tensor-parallel and pipeline-parallel sizes are equal to expected values if the model parallel groups are initialized. """ - # get the backend of _DEVICE_WORLD_GROUP - backend = backend or torch.distributed.get_backend() + backend = backend or torch.distributed.get_backend( + get_world_group().device_group) if not model_parallel_is_initialized(): initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend) @@ -247,137 +661,48 @@ def ensure_model_parallel_initialized( ), ("tensor parallel group already initialized, but of unexpected size: " f"{get_tensor_model_parallel_world_size()=} vs. " f"{tensor_model_parallel_size=}") - assert (get_pipeline_model_parallel_world_size( - ) == pipeline_model_parallel_size), ( + pp_world_size = get_pp_group().world_size + assert (pp_world_size == pipeline_model_parallel_size), ( "pipeline parallel group already initialized, but of unexpected size: " - f"{get_pipeline_model_parallel_world_size()=} vs. " + f"{pp_world_size=} vs. " f"{pipeline_model_parallel_size=}") def model_parallel_is_initialized(): """Check if tensor and pipeline parallel groups are initialized.""" - return (_TP_DEVICE_GROUP is not None and _PP_DEVICE_GROUP is not None) - - -def get_cpu_world_group(): - """Get the CPU world group.""" - assert _CPU_WORLD_GROUP is not None, ("CPU world group is not initialized") - return _CPU_WORLD_GROUP - - -def get_tensor_model_parallel_group(): - """Get the tensor model parallel group the caller rank belongs to.""" - assert _TP_DEVICE_GROUP is not None, ( - "tensor model parallel group is not initialized") - return _TP_DEVICE_GROUP - - -def get_tensor_model_parallel_cpu_group(): - """Get the tensor model parallel cpu group the caller rank belongs to.""" - assert _TP_CPU_GROUP is not None, ( - "tensor model parallel cpu group is not initialized") - return _TP_CPU_GROUP - - -def get_pipeline_model_parallel_group(): - """Get the pipeline model parallel group the caller rank belongs to.""" - assert _PP_DEVICE_GROUP is not None, ( - "pipeline model parallel group is not initialized") - return _PP_DEVICE_GROUP - - -def get_pipeline_model_parallel_cpu_group(): - """Get the pipeline model parallel cpu group the caller rank belongs to.""" - assert _PP_CPU_GROUP is not None, ( - "pipeline model parallel cpu group is not initialized") - return _PP_CPU_GROUP + return (_TP is not None and _PP is not None) def get_tensor_model_parallel_world_size(): """Return world size for the tensor model parallel group.""" - return torch.distributed.get_world_size( - group=get_tensor_model_parallel_group()) - - -def get_pipeline_model_parallel_world_size(): - """Return world size for the pipeline model parallel group.""" - return torch.distributed.get_world_size( - group=get_pipeline_model_parallel_group()) + return get_tp_group().world_size def get_tensor_model_parallel_rank(): """Return my rank for the tensor model parallel group.""" - return torch.distributed.get_rank(group=get_tensor_model_parallel_group()) - - -def get_pipeline_model_parallel_rank(): - """Return my rank for the pipeline model parallel group.""" - return torch.distributed.get_rank( - group=get_pipeline_model_parallel_group()) - - -def get_tensor_model_parallel_src_rank(): - """Calculate the global rank corresponding to the first local rank - in the tensor model parallel group.""" - global_rank = torch.distributed.get_rank() - local_world_size = get_tensor_model_parallel_world_size() - return (global_rank // local_world_size) * local_world_size - - -def get_pipeline_model_parallel_first_rank(): - """Return the global rank of the first process in the pipeline for the - current tensor parallel group""" - assert _PP_GLOBAL_RANKS is not None, ( - "Pipeline parallel group is not initialized") - return _PP_GLOBAL_RANKS[0] - - -def get_pipeline_model_parallel_last_rank(): - """Return the global rank of the last process in the pipeline for the - current tensor parallel group""" - assert _PP_GLOBAL_RANKS is not None, ( - "Pipeline parallel group is not initialized") - last_rank_local = get_pipeline_model_parallel_world_size() - 1 - return _PP_GLOBAL_RANKS[last_rank_local] - - -def get_pipeline_model_parallel_next_rank(): - """Return the global rank that follows the caller in the pipeline""" - assert _PP_GLOBAL_RANKS is not None, ( - "Pipeline parallel group is not initialized") - rank_in_pipeline = get_pipeline_model_parallel_rank() - world_size = get_pipeline_model_parallel_world_size() - return _PP_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size] - - -def get_pipeline_model_parallel_prev_rank(): - """Return the global rank that precedes the caller in the pipeline""" - assert _PP_GLOBAL_RANKS is not None, ( - "Pipeline parallel group is not initialized") - rank_in_pipeline = get_pipeline_model_parallel_rank() - world_size = get_pipeline_model_parallel_world_size() - return _PP_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size] + return get_tp_group().rank_in_group def destroy_model_parallel(): """Set the groups to none and destroy them.""" - global _TP_DEVICE_GROUP - if _TP_DEVICE_GROUP: - torch.distributed.destroy_process_group(_TP_DEVICE_GROUP) - _TP_DEVICE_GROUP = None - global _TP_CPU_GROUP - if _TP_CPU_GROUP: - torch.distributed.destroy_process_group(_TP_CPU_GROUP) - _TP_CPU_GROUP = None - global _TP_PYNCCL_COMMUNICATOR - _TP_PYNCCL_COMMUNICATOR = None - - global _PP_DEVICE_GROUP - if _PP_DEVICE_GROUP: - torch.distributed.destroy_process_group(_PP_DEVICE_GROUP) - _PP_DEVICE_GROUP = None - global _PP_GLOBAL_RANKS - _PP_GLOBAL_RANKS = None + global _TP + if _TP: + _TP.destroy() + _TP = None + + global _PP + if _PP: + _PP.destroy() + _PP = None + + +def destroy_distributed_environment(): + global _WORLD + if _WORLD: + _WORLD.destroy() + _WORLD = None + if torch.distributed.is_initialized(): + torch.distributed.destroy_process_group() def is_in_the_same_node(pg: ProcessGroup): diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index e3276e1a4b58a..67356d11f14a6 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -519,7 +519,7 @@ def add_cli_args( parser.add_argument("--device", type=str, default=EngineArgs.device, - choices=["auto", "cuda", "neuron", "cpu"], + choices=["auto", "cuda", "neuron", "cpu", "tpu"], help='Device type for vLLM execution.') # Related to Vision-language models such as llava diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index aa1f07b5bdc24..943402c865bd2 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -375,6 +375,9 @@ def from_engine_args( if engine_config.device_config.device_type == "neuron": from vllm.executor.neuron_executor import NeuronExecutorAsync executor_class = NeuronExecutorAsync + elif engine_config.device_config.device_type == "tpu": + from vllm.executor.tpu_executor import TPUExecutorAsync + executor_class = TPUExecutorAsync elif engine_config.device_config.device_type == "cpu": assert distributed_executor_backend is None, ( "Distributed execution is not supported with the CPU backend.") diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index a1c77e04a5154..9a86d41de969e 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -6,7 +6,6 @@ from transformers import GenerationConfig, PreTrainedTokenizer -import vllm from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, LoadConfig, LoRAConfig, ModelConfig, ParallelConfig, SchedulerConfig, SpeculativeConfig, @@ -38,6 +37,7 @@ from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, usage_message) from vllm.utils import Counter +from vllm.version import __version__ as VLLM_VERSION logger = init_logger(__name__) _LOCAL_LOGGING_INTERVAL_SEC = 5 @@ -169,7 +169,7 @@ def __init__( "enforce_eager=%s, kv_cache_dtype=%s, " "quantization_param_path=%s, device_config=%s, " "decoding_config=%r, seed=%d, served_model_name=%s)", - vllm.__version__, + VLLM_VERSION, model_config.model, speculative_config, model_config.tokenizer, @@ -343,6 +343,9 @@ def from_engine_args( if engine_config.device_config.device_type == "neuron": from vllm.executor.neuron_executor import NeuronExecutor executor_class = NeuronExecutor + elif engine_config.device_config.device_type == "tpu": + from vllm.executor.tpu_executor import TPUExecutor + executor_class = TPUExecutor elif engine_config.device_config.device_type == "cpu": from vllm.executor.cpu_executor import CPUExecutor executor_class = CPUExecutor diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 31dc5557e76f4..9c7a3e701248c 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -554,11 +554,13 @@ def _run_engine( total=num_requests, desc="Processed prompts", dynamic_ncols=True, - postfix=f"Generation Speed: {0:.2f} toks/s", + postfix=(f"est. speed input: {0:.2f} toks/s, " + f"output: {0:.2f} toks/s"), ) # Run the engine. outputs: List[Union[RequestOutput, EmbeddingRequestOutput]] = [] - total_toks = 0 + total_in_toks = 0 + total_out_toks = 0 while self.llm_engine.has_unfinished_requests(): step_outputs = self.llm_engine.step() for output in step_outputs: @@ -567,10 +569,15 @@ def _run_engine( if use_tqdm: if isinstance(output, RequestOutput): # Calculate tokens only for RequestOutput - total_toks += sum( + total_in_toks += len(output.prompt_token_ids) + in_spd = total_in_toks / pbar.format_dict["elapsed"] + total_out_toks += sum( len(stp.token_ids) for stp in output.outputs) - spd = total_toks / pbar.format_dict["elapsed"] - pbar.postfix = f"Generation Speed: {spd:.2f} toks/s" + out_spd = total_out_toks / pbar.format_dict[ + "elapsed"] + pbar.postfix = ( + f"est. speed input: {in_spd:.2f} toks/s, " + f"output: {out_spd:.2f} toks/s") pbar.update(1) if use_tqdm: pbar.close() diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index e7503b9655830..ea6275920c79d 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -15,7 +15,6 @@ from prometheus_client import make_asgi_app from starlette.routing import Mount -import vllm import vllm.envs as envs from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine @@ -29,6 +28,7 @@ from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding from vllm.logger import init_logger from vllm.usage.usage_lib import UsageContext +from vllm.version import __version__ as VLLM_VERSION TIMEOUT_KEEP_ALIVE = 5 # seconds @@ -93,7 +93,7 @@ async def show_available_models(): @app.get("/version") async def show_version(): - ver = {"version": vllm.__version__} + ver = {"version": VLLM_VERSION} return JSONResponse(content=ver) @@ -174,7 +174,7 @@ async def authentication(request: Request, call_next): raise ValueError(f"Invalid middleware {middleware}. " f"Must be a function or a class.") - logger.info("vLLM API server version %s", vllm.__version__) + logger.info("vLLM API server version %s", VLLM_VERSION) logger.info("args: %s", args) if args.served_model_name is not None: diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index 731f4f4a4028a..7a6819c35a92d 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -5,7 +5,6 @@ import aiohttp -import vllm from vllm.engine.arg_utils import AsyncEngineArgs, nullable_str from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.openai.protocol import (BatchRequestInput, @@ -15,6 +14,7 @@ from vllm.logger import init_logger from vllm.usage.usage_lib import UsageContext from vllm.utils import random_uuid +from vllm.version import __version__ as VLLM_VERSION logger = init_logger(__name__) @@ -135,7 +135,7 @@ async def main(args): if __name__ == "__main__": args = parse_args() - logger.info("vLLM API server version %s", vllm.__version__) + logger.info("vLLM API server version %s", VLLM_VERSION) logger.info("args: %s", args) asyncio.run(main(args)) diff --git a/vllm/envs.py b/vllm/envs.py index f0513b9af2764..f03b69f4b8866 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -27,6 +27,7 @@ VLLM_TRACE_FUNCTION: int = 0 VLLM_ATTENTION_BACKEND: Optional[str] = None VLLM_CPU_KVCACHE_SPACE: int = 0 + VLLM_XLA_CACHE_PATH: str = "~/.vllm/xla_cache/" VLLM_USE_RAY_COMPILED_DAG: bool = False VLLM_WORKER_MULTIPROC_METHOD: str = "spawn" VLLM_IMAGE_FETCH_TIMEOUT: int = 5 @@ -217,6 +218,11 @@ # Default is 5 seconds "VLLM_IMAGE_FETCH_TIMEOUT": lambda: int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", "5")), + + # Path to the XLA persistent cache directory. + # Only used for XLA devices such as TPUs. + "VLLM_XLA_CACHE_PATH": + lambda: os.getenv("VLLM_XLA_CACHE_PATH", "~/.vllm/xla_cache/"), } # end-env-vars-definition diff --git a/vllm/executor/multiproc_gpu_executor.py b/vllm/executor/multiproc_gpu_executor.py index 99c9e52034cc1..8385e56f88b39 100644 --- a/vllm/executor/multiproc_gpu_executor.py +++ b/vllm/executor/multiproc_gpu_executor.py @@ -9,7 +9,8 @@ ResultHandler, WorkerMonitor) from vllm.logger import init_logger from vllm.sequence import ExecuteModelRequest, SamplerOutput -from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, +from vllm.utils import (cuda_device_count_stateless, + get_distributed_init_method, get_ip, get_open_port, get_vllm_instance_id, make_async) logger = init_logger(__name__) @@ -33,8 +34,7 @@ def _init_executor(self) -> None: # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" - from torch.cuda import device_count - assert world_size <= device_count(), ( + assert world_size <= cuda_device_count_stateless(), ( "please set tensor_parallel_size to less than max local gpu count") distributed_init_method = get_distributed_init_method( diff --git a/vllm/executor/tpu_executor.py b/vllm/executor/tpu_executor.py new file mode 100644 index 0000000000000..7061ad85f88c0 --- /dev/null +++ b/vllm/executor/tpu_executor.py @@ -0,0 +1,101 @@ +from typing import List, Set, Tuple + +import torch + +from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase +from vllm.logger import init_logger +from vllm.lora.request import LoRARequest +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, + make_async) + +logger = init_logger(__name__) + + +class TPUExecutor(ExecutorBase): + + def _init_executor(self) -> None: + assert not self.scheduler_config.chunked_prefill_enabled, ( + "Chunked prefill is not yet supported for TPU backend") + assert not self.speculative_config, ( + "Speculative decoding is not yet supported for TPU backend") + if self.model_config.dtype in (torch.float16, torch.float32): + logger.warning( + "The TPU backend currently does not support %s. " + "Using bfloat16 instead.", self.model_config.dtype) + self.model_config.dtype = torch.bfloat16 + + # Instantiate the worker and load the model to the device. + self._init_worker() + + def _init_worker(self): + from vllm.worker.tpu_worker import TPUWorker + + assert self.parallel_config.world_size == 1, ( + "TPUExecutor currently only supports a single TPU chip.") + distributed_init_method = get_distributed_init_method( + get_ip(), get_open_port()) + self.driver_worker = TPUWorker( + self.model_config, + self.parallel_config, + self.scheduler_config, + self.device_config, + self.cache_config, + self.load_config, + self.vision_language_config, + local_rank=0, + rank=0, + distributed_init_method=distributed_init_method, + ) + self.driver_worker.init_device() + self.driver_worker.load_model() + + def initialize_cache( + self, + num_gpu_blocks: int, + num_cpu_blocks: int, + ) -> None: + """Initialize the KV cache by invoking the underlying worker.""" + # NOTE: This is logged in the executor because there can be >1 worker + # with other executors. We could log in the engine level, but work + # remains to abstract away the device for non-GPU configurations. + logger.info("# TPU blocks: %d, # CPU blocks: %d", num_gpu_blocks, + num_cpu_blocks) + self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks) + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """Determine the number of available KV blocks by invoking the + underlying worker. + """ + return self.driver_worker.determine_num_available_blocks() + + def execute_model( + self, + execute_model_req: ExecuteModelRequest, + ) -> List[SamplerOutput]: + output = self.driver_worker.execute_model(execute_model_req) + return output + + def add_lora(self, lora_request: LoRARequest) -> bool: + raise NotImplementedError("LoRA is not implemented for TPU backend.") + + def remove_lora(self, lora_id: int) -> bool: + raise NotImplementedError("LoRA is not implemented for TPU backend.") + + def list_loras(self) -> Set[int]: + raise NotImplementedError("LoRA is not implemented for TPU backend.") + + def check_health(self) -> None: + # TPUExecutor will always be healthy as long as it's running. + return + + +class TPUExecutorAsync(TPUExecutor, ExecutorAsyncBase): + + async def execute_model_async( + self, + sexecute_model_req: ExecuteModelRequest, + ) -> SamplerOutput: + output = await make_async(self.driver_worker.execute_model + )(sexecute_model_req) + return output diff --git a/vllm/inputs.py b/vllm/inputs.py index 85c9cd84f5ed5..026903e19a26e 100644 --- a/vllm/inputs.py +++ b/vllm/inputs.py @@ -4,7 +4,7 @@ from typing_extensions import NotRequired if TYPE_CHECKING: - from vllm.sequence import MultiModalData + from vllm.multimodal import MultiModalData class ParsedText(TypedDict): diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 1d49213cd4ab5..56aa629ae3455 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -1,6 +1,6 @@ import torch.nn as nn -from vllm.utils import is_cpu, is_hip +from vllm.utils import is_cpu, is_hip, is_tpu class CustomOp(nn.Module): @@ -56,5 +56,7 @@ def dispatch_forward(self): return self.forward_hip elif is_cpu(): return self.forward_cpu + elif is_tpu(): + return self.forward_tpu else: return self.forward_cuda diff --git a/vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000000000..8cc6c643f236d --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 5 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 64, + "num_warps": 8, + "num_stages": 5 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 5 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 5 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 3 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 2 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 3 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 2 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 3 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 3 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 3 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "2048": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000000000..d4c9ddd12972a --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 3 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 3 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 2 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 5 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 2 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "512": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 3 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json b/vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json new file mode 100644 index 0000000000000..b2799ed3a866e --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 5 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 5 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 2 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 256, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 2 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 3 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 3 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 3 + }, + "1024": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "1536": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + } +} diff --git a/vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json b/vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json new file mode 100644 index 0000000000000..b8d3be2313fa1 --- /dev/null +++ b/vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json @@ -0,0 +1,146 @@ +{ + "1": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "2": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 4 + }, + "4": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 4 + }, + "8": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 4, + "num_stages": 3 + }, + "16": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 32, + "num_warps": 8, + "num_stages": 4 + }, + "24": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 64, + "num_warps": 4, + "num_stages": 3 + }, + "32": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 16, + "num_warps": 4, + "num_stages": 2 + }, + "48": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 2 + }, + "64": { + "BLOCK_SIZE_M": 16, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "96": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 2 + }, + "128": { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 128, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 2 + }, + "256": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "512": { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 4, + "num_stages": 3 + }, + "1024": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "1536": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + }, + "2048": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "3072": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 1, + "num_warps": 8, + "num_stages": 4 + }, + "4096": { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 256, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 16, + "num_warps": 8, + "num_stages": 4 + } +} diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py index d2b0ce0dbbf0b..c7f04784591b2 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py @@ -7,8 +7,8 @@ from vllm.model_executor.layers.quantization.base_config import ( # noqa: E501 QuantizationConfig) from vllm.model_executor.layers.quantization.compressed_tensors.schemes import ( - CompressedTensorsScheme, CompressedTensorsW8A8DynamicToken, - CompressedTensorsW8A8StaticTensor) + CompressedTensorsScheme, CompressedTensorsW4A16, + CompressedTensorsW8A8DynamicToken, CompressedTensorsW8A8StaticTensor) from vllm.model_executor.layers.quantization.compressed_tensors.utils import ( QuantizationArgs, QuantizationStrategy, find_first_name_or_class_match) @@ -47,16 +47,27 @@ def from_config(cls, config: Dict[str, Any]) -> "CompressedTensorsConfig": layer_quant_details: Dict[str, Any] = dict() ignore: List[str] = config.get("ignore", None) + # The quant_config has multiple config_groups, each containing + # an input_activations key with details about how the activations are + # quantized, a weights key indicating how the weights are quantized, + # and a list of targets under the `targets` key, dictating which + # layers are impacted by the quantization details. The quantization + # details follow the structure defined by the QuantizationArgs + # pydantic model, which is used to verify the structure of the + # quant_config and also store the details for later use. for key, quant_config in config["config_groups"].items(): targets = quant_config.get("targets") for target in targets: layer_quant_details[target] = {} layer_quant_details[target][ - "weight"] = QuantizationArgs.parse_obj( + "weights"] = QuantizationArgs.parse_obj( quant_config.get("weights")) - layer_quant_details[target][ - "input"] = QuantizationArgs.parse_obj( - quant_config.get("input_activations")) + try: + layer_quant_details[target][ + "input_activations"] = QuantizationArgs.parse_obj( + quant_config.get("input_activations")) + except Exception: + layer_quant_details[target]["input_activations"] = None return cls(layer_quant_details=layer_quant_details, ignore=ignore) @@ -86,8 +97,23 @@ def _is_dynamic_token_w8a8(self, weight_quant: BaseModel, return is_8_bits and is_token_tensor and is_symmetric and is_dynamic + def _is_w4a16(self, weight_quant: BaseModel, + input_quant: BaseModel) -> bool: + input_quant_none = input_quant is None + is_4_bits = weight_quant.num_bits == 4 + is_symmetric = weight_quant.symmetric + is_static = not weight_quant.dynamic + + return is_4_bits and input_quant_none and is_symmetric and is_static + def _get_schema(self, weight_quant: BaseModel, input_quant: BaseModel) -> "CompressedTensorsScheme": + + if self._is_w4a16(weight_quant, input_quant): + return CompressedTensorsW4A16(num_bits=weight_quant.num_bits, + strategy=weight_quant.strategy, + group_size=weight_quant.group_size) + if self._is_static_tensor_w8a8(weight_quant, input_quant): return CompressedTensorsW8A8StaticTensor() @@ -113,8 +139,9 @@ def get_scheme(self, layer: torch.nn.Module) -> "CompressedTensorsScheme": raise ValueError( f"Could not find quantization details for {layer}.") - return self._get_schema(weight_quant=layer_quant_details["weight"], - input_quant=layer_quant_details["input"]) + return self._get_schema( + weight_quant=layer_quant_details["weights"], + input_quant=layer_quant_details["input_activations"]) class CompressedTensorsLinearMethod(LinearMethodBase): @@ -140,6 +167,7 @@ def create_weights(self, layer: torch.nn.Module, layer=layer, input_size_per_partition=input_size_per_partition, output_partition_sizes=output_partition_sizes, + input_size=input_size, output_size=output_size, params_dtype=params_dtype, weight_loader=weight_loader) diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py index 9a910f061f580..dc84d000803f9 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py @@ -1,6 +1,7 @@ from .compressed_tensors_scheme import CompressedTensorsScheme # noqa: F401 from .compressed_tensors_unquantized import ( # noqa: F401 CompressedTensorsUnquantized) +from .compressed_tensors_w4a16 import CompressedTensorsW4A16 # noqa: F401 from .compressed_tensors_w8a8_dynamictoken import ( # noqa: F401, E501 CompressedTensorsW8A8DynamicToken) from .compressed_tensors_w8a8_statictensor import ( # noqa: F401, E501 diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16.py new file mode 100644 index 0000000000000..90446a5ffae01 --- /dev/null +++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16.py @@ -0,0 +1,168 @@ +from typing import Callable, List, Optional + +import torch +from torch.nn import Parameter + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.quantization.compressed_tensors.schemes import ( + CompressedTensorsScheme) +from vllm.model_executor.layers.quantization.gptq_marlin import ( + GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N, GPTQMarlinState, + marlin_permute_scales) +from vllm.model_executor.utils import set_weight_attrs + +__all__ = ["CompressedTensorsW4A16"] + + +class CompressedTensorsW4A16(CompressedTensorsScheme): + + def __init__(self, + strategy: str, + num_bits: int, + group_size: Optional[int] = None): + self.num_bits = num_bits + self.strategy = strategy + self.group_size = group_size + + if self.strategy == "group" and self.group_size is None: + raise ValueError( + "group_size must be given when using strategy group") + + def create_weights(self, layer: torch.nn.Module, input_size: int, + output_partition_sizes: List[int], + input_size_per_partition: int, + params_dtype: torch.dtype, weight_loader: Callable, + **kwargs): + + pack_factor = 32 // self.num_bits + output_size_per_partition = sum(output_partition_sizes) + + if self.group_size is not None: + group_size = self.group_size + else: + group_size = input_size + + weight_scale_dim = None + scales_and_zp_size = input_size // group_size + + if (input_size != input_size_per_partition + and self.group_size is not None): + weight_scale_dim = 1 + scales_and_zp_size = input_size_per_partition // group_size + + weight = Parameter( + torch.empty( + output_size_per_partition, + input_size_per_partition // pack_factor, + dtype=torch.int32, + ), + requires_grad=False, + ) + + set_weight_attrs( + weight, { + "input_dim": 1, + "output_dim": 0, + "packed_dim": 1, + "pack_factor": pack_factor + }) + set_weight_attrs(weight, {"weight_loader": weight_loader}) + + layer.register_parameter("weight_packed", weight) + + weight_scale = Parameter( + torch.empty( + output_size_per_partition, + scales_and_zp_size, + dtype=params_dtype, + ), + requires_grad=False, + ) + + set_weight_attrs(weight_scale, {"weight_loader": weight_loader}) + set_weight_attrs(weight_scale, { + "input_dim": weight_scale_dim, + "output_dim": 0 + }) + layer.register_parameter("weight_scale", weight_scale) + + # A 2D array defining the original shape of the weights + # before packing + weight_shape = Parameter(torch.empty(2, dtype=torch.int64), + requires_grad=False) + + layer.register_parameter("weight_shape", weight_shape) + set_weight_attrs(weight_shape, {"weight_loader": weight_loader}) + + layer.input_size_per_partition = input_size_per_partition + layer.output_size_per_partition = output_size_per_partition + + layer.input_size = input_size + layer.marlin_state = GPTQMarlinState.REPACK + layer.is_k_full = True + layer.group_size = group_size + + max_workspace_size = ( + output_size_per_partition // + GPTQ_MARLIN_MIN_THREAD_N) * GPTQ_MARLIN_MAX_PARALLEL + + workspace = torch.zeros(max_workspace_size, + dtype=torch.int, + requires_grad=False) + layer.workspace = workspace + + def apply_weights(self, layer: torch.nn.Module, x: torch.Tensor): + reshaped_x = x.reshape(-1, x.shape[-1]) + + size_m = reshaped_x.shape[0] + part_size_n = layer.output_size_per_partition + part_size_k = layer.input_size_per_partition + + out_shape = x.shape[:-1] + (part_size_n, ) + + if layer.marlin_state == GPTQMarlinState.REPACK: + layer.marlin_state = GPTQMarlinState.READY + + # Newly generated tensors need to replace existing tensors that are + # already registered as parameters by vLLM (and won't be freed) + def replace_tensor(name, new_t): + # It is important to use resize_() here since it ensures + # the same buffer is reused + getattr(layer, name).resize_(new_t.shape) + getattr(layer, name).copy_(new_t) + del new_t + + cur_device = layer.weight_packed.device + + # Reset g_idx related tensors + layer.g_idx = Parameter(torch.empty(0, + dtype=torch.int, + device=cur_device), + requires_grad=False) + layer.g_idx_sort_indices = Parameter(torch.empty( + 0, dtype=torch.int, device=cur_device), + requires_grad=False) + + # Repack weights + marlin_qweight = ops.gptq_marlin_repack( + layer.weight_packed.t().contiguous(), layer.g_idx_sort_indices, + part_size_k, part_size_n, self.num_bits) + + replace_tensor("weight_packed", marlin_qweight) + + # Permute scales + scales_size_k = part_size_k + scales_size_n = part_size_n + + marlin_scales = marlin_permute_scales( + layer.weight_scale.squeeze().t().contiguous(), scales_size_k, + scales_size_n, layer.group_size, self.num_bits) + replace_tensor("weight_scale", marlin_scales) + + output = ops.gptq_marlin_gemm(reshaped_x, layer.weight_packed, + layer.weight_scale, layer.g_idx, + layer.g_idx_sort_indices, + layer.workspace, self.num_bits, size_m, + part_size_n, part_size_k, + layer.is_k_full) + return output.reshape(out_shape) diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_dynamictoken.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_dynamictoken.py index 25b707caeef33..9bb7bf4470872 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_dynamictoken.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_dynamictoken.py @@ -81,5 +81,5 @@ def apply_weights(self, layer: torch.nn.Module, x: torch.Tensor): weight_scale = layer.weight_scale x_q, input_scales = custom_ops.scaled_int8_quant(x) - return custom_ops.cutlass_scaled_mm_dq(x_q, weight.t(), input_scales, - weight_scale, x.dtype) + return custom_ops.cutlass_scaled_mm(x_q, weight.t(), input_scales, + weight_scale, x.dtype) diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_statictensor.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_statictensor.py index 7559fc0f95b24..88c15c5c26a11 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_statictensor.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_statictensor.py @@ -99,5 +99,5 @@ def apply_weights(self, layer: torch.nn.Module, x: torch.Tensor): # Input quantize x_q, _ = custom_ops.scaled_int8_quant(x, act_scale) - return custom_ops.cutlass_scaled_mm_dq(x_q, weight.t(), act_scale, - weight_scale, x.dtype) + return custom_ops.cutlass_scaled_mm(x_q, weight.t(), act_scale, + weight_scale, x.dtype) diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index 0cf2bd927a800..bc08bfcc32b3a 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -257,11 +257,13 @@ def apply(self, # If dynamic, layer.input_scale is None and x_scale computed from x. # If static, layer.input_scale is scalar and x_scale is input_scale. - if bias is None and self.cutlass_fp8_supported: + # Temporarily disable CUTLASS kernels due to an illegal memory access + #if bias is None and self.cutlass_fp8_supported: + if False: qinput, x_scale = ops.scaled_fp8_quant(x, layer.input_scale) # Fused GEMM_DQ - output = ops.cutlass_scaled_mm_dq( + output = ops.cutlass_scaled_mm( qinput, layer.weight, out_dtype=x.dtype, diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index d2652106b8441..792c4729355a7 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -28,6 +28,7 @@ import torch.nn as nn from vllm.model_executor.custom_op import CustomOp +from vllm.utils import is_tpu def _rotate_neox(x: torch.Tensor) -> torch.Tensor: @@ -43,6 +44,19 @@ def _rotate_gptj(x: torch.Tensor) -> torch.Tensor: return x.flatten(-2) +def _apply_rotary_emb( + x: torch.Tensor, + freqs_cis: torch.Tensor, +) -> torch.Tensor: + x_ = torch.view_as_complex( + torch.stack(torch.chunk(x.transpose(1, 2).float(), 2, dim=-1), dim=-1)) + x_out = torch.view_as_real(x_ * freqs_cis).type_as(x) + x_out = torch.cat(torch.chunk(x_out, 2, dim=-1), dim=-2) + x_out = x_out.reshape(x_out.shape[0], x_out.shape[1], x_out.shape[2], + -1).transpose(1, 2) + return x_out + + class RotaryEmbedding(CustomOp): """Original rotary positional embedding.""" @@ -64,8 +78,14 @@ def __init__( self.dtype = dtype cache = self._compute_cos_sin_cache() - cache = cache.to(dtype) - self.register_buffer("cos_sin_cache", cache, persistent=False) + self.use_native2 = is_tpu() and is_neox_style + if not self.use_native2: + cache = cache.to(dtype) + self.register_buffer("cos_sin_cache", cache, persistent=False) + else: + cos, sin = cache.chunk(2, dim=-1) + freqs_cis = cos + 1j * sin + self.register_buffer("freqs_cis", freqs_cis, persistent=False) def _compute_inv_freq(self, base: Union[int, float]) -> torch.Tensor: """Compute the inverse frequency.""" @@ -100,7 +120,11 @@ def forward_native( key: torch.Tensor, offsets: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: - """PyTorch-native implementation equivalent to forward().""" + """A PyTorch-native implementation equivalent to forward(). + + This method mimics the implementation of the custom CUDA kernel + used in `forward_cuda()`. + """ query = query.view(*query.shape[:-1], -1, self.head_size) key = key.view(*key.shape[:-1], -1, self.head_size) @@ -138,6 +162,42 @@ def forward_native( key = key.flatten(-2) return query, key + def forward_native2( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + offsets: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Another PyTorch-native implementation of forward(). + + This method might perform better than `forward_native()` when compiled. + """ + if positions.dim() == 1: + batch_size = 1 + seq_len = positions.shape[0] + else: + batch_size, seq_len = positions.shape + if offsets is not None: + positions = positions + offsets + freqs_cis = self.freqs_cis.index_select(0, positions.flatten()) + freqs_cis = freqs_cis.view(batch_size, 1, seq_len, -1) + + query_shape = query.shape + query = query.view(batch_size, seq_len, -1, self.head_size) + query_rot = query[..., :self.rotary_dim] + query_pass = query[..., self.rotary_dim:] + query_rot = _apply_rotary_emb(query_rot, freqs_cis) + query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape) + + key_shape = key.shape + key = key.view(batch_size, seq_len, -1, self.head_size) + key_rot = key[..., :self.rotary_dim] + key_pass = key[..., self.rotary_dim:] + key_rot = _apply_rotary_emb(key_rot, freqs_cis) + key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape) + return query, key + def forward_cuda( self, positions: torch.Tensor, @@ -161,6 +221,17 @@ def forward_cuda( self.cos_sin_cache, self.is_neox_style) return query, key + def forward_tpu( + self, + positions: torch.Tensor, + query: torch.Tensor, + key: torch.Tensor, + offsets: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + forward_fn = (self.forward_native2 + if self.use_native2 else self.forward_native) + return forward_fn(positions, query, key, offsets) + def extra_repr(self) -> str: s = f"head_size={self.head_size}, rotary_dim={self.rotary_dim}" s += f", max_position_embeddings={self.max_position_embeddings}" diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 491a8ccc4f7e2..11f5758797916 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -24,7 +24,7 @@ QuantizationConfig) from vllm.model_executor.model_loader.tensorizer import ( TensorizerConfig, is_vllm_tensorized, load_with_tensorizer, - tensorizer_weights_iterator) + serialize_vllm_model, tensorizer_weights_iterator) from vllm.model_executor.model_loader.utils import (get_model_architecture, set_default_torch_dtype) # UPSTREAM SYNC: needed for sparsity @@ -36,6 +36,7 @@ safetensors_weights_iterator) from vllm.model_executor.models.vlm_base import VisionLanguageModelBase from vllm.model_executor.utils import set_weight_attrs +from vllm.utils import is_tpu logger = init_logger(__name__) @@ -248,12 +249,26 @@ def _get_weights_iterator( if self.load_config.load_format == LoadFormat.NPCACHE: # Currently np_cache only support *.bin checkpoints assert use_safetensors is False - return np_cache_weights_iterator(model_name_or_path, - self.load_config.download_dir, - hf_folder, hf_weights_files) - if use_safetensors: - return safetensors_weights_iterator(hf_weights_files) - return pt_weights_iterator(hf_weights_files) + weights_iterator = np_cache_weights_iterator( + model_name_or_path, self.load_config.download_dir, hf_folder, + hf_weights_files) + elif use_safetensors: + weights_iterator = safetensors_weights_iterator(hf_weights_files) + else: + weights_iterator = pt_weights_iterator(hf_weights_files) + + if is_tpu(): + # In PyTorch XLA, we should call `xm.mark_step` frequently so that + # not too many ops are accumulated in the XLA program. + import torch_xla.core.xla_model as xm + + def _xla_weights_iterator(iterator: Generator): + for weights in iterator: + yield weights + xm.mark_step() + + weights_iterator = _xla_weights_iterator(weights_iterator) + return weights_iterator def load_model(self, *, model_config: ModelConfig, device_config: DeviceConfig, @@ -398,6 +413,12 @@ def load_model(self, *, model_config: ModelConfig, cache_config: CacheConfig) -> nn.Module: self._verify_config(model_config, parallel_config) + if parallel_config.tensor_parallel_size > 1: + from vllm.distributed import get_tensor_model_parallel_rank + self.tensorizer_config.tensorizer_uri = \ + self.tensorizer_config.tensorizer_uri \ + % get_tensor_model_parallel_rank() + if is_vllm_tensorized(self.tensorizer_config): return self._load_model_serialized(model_config, device_config, lora_config, @@ -408,6 +429,16 @@ def load_model(self, *, model_config: ModelConfig, vision_language_config, cache_config) + @staticmethod + def save_model( + model: torch.nn.Module, + tensorizer_config: TensorizerConfig, + ) -> None: + serialize_vllm_model( + model=model, + tensorizer_config=tensorizer_config, + ) + class ShardedStateLoader(BaseModelLoader): """ diff --git a/vllm/model_executor/model_loader/tensorizer.py b/vllm/model_executor/model_loader/tensorizer.py index 2cf4ce5f88521..d79fedaea428e 100644 --- a/vllm/model_executor/model_loader/tensorizer.py +++ b/vllm/model_executor/model_loader/tensorizer.py @@ -2,11 +2,11 @@ import dataclasses import io import os +import re import time -import typing from dataclasses import dataclass from functools import partial -from typing import Generator, Optional, Tuple, Type, Union +from typing import BinaryIO, Generator, Optional, Tuple, Type, Union import torch from torch import nn @@ -14,6 +14,7 @@ import vllm.envs as envs from vllm.config import ModelConfig, ParallelConfig +from vllm.engine.arg_utils import EngineArgs from vllm.engine.llm_engine import LLMEngine from vllm.logger import init_logger from vllm.model_executor.layers.quantization.base_config import ( @@ -48,8 +49,7 @@ @dataclass class TensorizerConfig: - tensorizer_uri: Union[io.BufferedIOBase, io.RawIOBase, typing.BinaryIO, - str, bytes, os.PathLike, int] + tensorizer_uri: str vllm_tensorized: Optional[bool] = False verify_hash: Optional[bool] = False num_readers: Optional[int] = None @@ -60,6 +60,12 @@ class TensorizerConfig: model_class: Optional[Type[torch.nn.Module]] = None hf_config: Optional[PretrainedConfig] = None dtype: Optional[Union[str, torch.dtype]] = None + _is_sharded: bool = False + + def __post_init__(self): + # check if the configuration is for a sharded vLLM model + self._is_sharded = isinstance(self.tensorizer_uri, str) \ + and re.search(r'%0\dd', self.tensorizer_uri) is not None def _construct_tensorizer_args(self) -> "TensorizerArgs": tensorizer_args = { @@ -78,13 +84,12 @@ def verify_with_parallel_config( self, parallel_config: "ParallelConfig", ) -> None: - if (parallel_config.tensor_parallel_size > 1 - and self.tensorizer_uri is not None): + if parallel_config.tensor_parallel_size > 1 \ + and not self._is_sharded: raise ValueError( - "Loading to multiple GPUs is not currently supported with " - "vLLM-serialized models. Please set tensor_parallel_size=1." - " or use a non-vLLM-serialized model, such as a " - "serialized Hugging Face `PretrainedModel`.") + "For a sharded model, tensorizer_uri should include a" + " string format template like '%04d' to be formatted" + " with the rank of the shard") def verify_with_model_config(self, model_config: "ModelConfig") -> None: if (model_config.quantization is not None @@ -102,8 +107,8 @@ def load_with_tensorizer(tensorizer_config: TensorizerConfig, @dataclass class TensorizerArgs: - tensorizer_uri: Union[io.BufferedIOBase, io.RawIOBase, typing.BinaryIO, - str, bytes, os.PathLike, int] + tensorizer_uri: Union[io.BufferedIOBase, io.RawIOBase, BinaryIO, str, + bytes, os.PathLike, int] vllm_tensorized: Optional[bool] = False verify_hash: Optional[bool] = False num_readers: Optional[int] = None @@ -332,6 +337,7 @@ def deserialize(self): ) as stream, TensorDeserializer( stream, dtype=self.tensorizer_config.dtype, + device=f'cuda:{torch.cuda.current_device()}', **self.tensorizer_args.deserializer_params) as deserializer: deserializer.load_into_module(self.model) end = time.perf_counter() @@ -400,33 +406,70 @@ def is_vllm_tensorized(tensorizer_config: "TensorizerConfig") -> bool: return False -def get_pretensorized_vllm_model(engine: "LLMEngine") -> nn.Module: - model = (engine.model_executor.driver_worker.model_runner.model) +def serialize_vllm_model( + model: nn.Module, + tensorizer_config: TensorizerConfig, +) -> nn.Module: model.register_parameter( "vllm_tensorized_marker", nn.Parameter(torch.tensor((1, ), device="meta"), requires_grad=False)) - return model - - -def serialize_vllm_model(engine: "LLMEngine", - tensorizer_config : TensorizerConfig, - encryption_key_path: Optional[str] = None) \ - -> nn.Module: - - model = get_pretensorized_vllm_model(engine) tensorizer_args = tensorizer_config._construct_tensorizer_args() + encryption_params = None - if encryption_key_path is not None: - encryption_params = EncryptionParams.random() - with _write_stream(encryption_key_path, - **tensorizer_args.stream_params) as stream: - stream.write(encryption_params.key) + if (keyfile := tensorizer_config.encryption_keyfile) is not None: + with open(keyfile, "rb") as f: + key = f.read() + encryption_params = EncryptionParams(key=key) - with _write_stream(tensorizer_args.tensorizer_uri, - **tensorizer_args.stream_params) as stream: + output_file = tensorizer_args.tensorizer_uri + if tensorizer_config._is_sharded: + from vllm.distributed import get_tensor_model_parallel_rank + output_file = output_file % get_tensor_model_parallel_rank() + + with _write_stream(output_file, **tensorizer_args.stream_params) as stream: serializer = TensorSerializer(stream, encryption=encryption_params) serializer.write_module(model) serializer.close() - logger.info("Successfully serialized model to %s", - str(tensorizer_args.tensorizer_uri)) + logger.info("Successfully serialized model to %s", str(output_file)) return model + + +def tensorize_vllm_model(engine_args: EngineArgs, + tensorizer_config: TensorizerConfig, + generate_keyfile: bool = True): + """Utility to load a model and then serialize it with Tensorizer + + Intended to be used separately from running a vLLM server since it + creates its own Engine instance. + """ + engine_config = engine_args.create_engine_config() + tensorizer_config.verify_with_model_config(engine_config.model_config) + tensorizer_config.verify_with_parallel_config( + engine_config.parallel_config) + + # generate the encryption key before creating the engine to support sharding + if generate_keyfile and (keyfile := + tensorizer_config.encryption_keyfile) is not None: + encryption_params = EncryptionParams.random() + with _write_stream( + keyfile, + s3_access_key_id=tensorizer_config.s3_access_key_id, + s3_secret_access_key=tensorizer_config.s3_secret_access_key, + s3_endpoint=tensorizer_config.s3_endpoint, + ) as stream: + stream.write(encryption_params.key) + + engine = LLMEngine.from_engine_args(engine_args) + if tensorizer_config._is_sharded: + # if the engine is a distributed engine (for tensor parallel) then each + # worker shard needs to serialize its part of the model. + engine.model_executor._run_workers( + "save_tensorized_model", + tensorizer_config=tensorizer_config, + ) + else: + # with a single worker, we can get to the underlying model directly + serialize_vllm_model( + engine.model_executor.driver_worker.model_runner.model, + tensorizer_config, + ) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 67b32a08833b6..39355b9d3ab44 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -227,7 +227,7 @@ def forward( attn_metadata: AttentionMetadata, **kwargs: object, ) -> SamplerOutput: - """Run forward pass for Llava 1.5. + """Run forward pass for LLaVA-1.5. One key thing to understand is the `input_ids` already accounts for the positions of the to-be-inserted image embeddings. @@ -247,22 +247,25 @@ def forward( This way, the `positions` and `attn_metadata` are consistent with the `input_ids`. - The model takes two types of image inputs: - PIXEL_VALUES and IMAGE_FEATURES. - The following shows how each maps to huggingface implementation. - PIXEL_VALUES: - - https://github.com/huggingface/transformers/blob/07bdbeb/src/transformers/models/llava/modeling_llava.py#L353 - IMAGE_FEATURES: - - https://github.com/huggingface/transformers/blob/07bdbeb/src/transformers/models/llava/modeling_llava.py#L430 - before going through the multi modal projector. + This model has two modes of image inputs: + `PIXEL_VALUES` and `IMAGE_FEATURES`. Args: input_ids: Flattened (concatenated) input_ids corresponding to a batch. - pixel_values: For PIXEL_VALUES, expects a batch with shape - [1, 3, 336, 336]. - image_features: For IMAGE_FEATURES, expects a batch with shape - [1, 576, 1024]. + pixel_values: The pixels in each input image. + Expects a batch with shape `[1, 3, 336, 336]`. + (Only applicable to `PIXEL_VALUES` mode) + image_features: The image features for each input image outputted by + the vision tower before passing to the multi-modal projector. + Expects a batch with shape `[1, 576, 1024]`. + (Only applicable to `IMAGE_FEATURES` mode) + + See also: + Each input maps to huggingface implementation, as follows: + + - `pixel_values`: https://github.com/huggingface/transformers/blob/v4.41.1/src/transformers/models/llava/modeling_llava.py#L360 + - `image_features`: https://github.com/huggingface/transformers/blob/v4.41.1/src/transformers/models/llava/modeling_llava.py#L437 """ image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index 57cbd1e4a6018..0ab9afea9ac69 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -108,15 +108,6 @@ def _image_pixel_processor( @MULTIMODAL_REGISTRY.register_image_pixel_input(_image_pixel_processor) @MULTIMODAL_REGISTRY.register_dummy_data(_get_dummy_image_data) class LlavaNextForConditionalGeneration(VisionLanguageModelBase): - """ - Args to `forward()`: - input_ids: Flattened (concatenated) input_ids corresponding to a - batch. - pixel_values: For PIXEL_VALUES, expects a batch with shape - [1, num_patches, 3, 336, 336]. - image_features: For IMAGE_FEATURES, expects a batch with shape - [1, num_patches, 1176, 1024]. - """ def __init__(self, config: LlavaNextConfig, @@ -355,7 +346,7 @@ def forward( attn_metadata: AttentionMetadata, **kwargs: object, ) -> SamplerOutput: - """Run forward pass for Llava 1.5. + """Run forward pass for LlaVA-NeXT. One key thing to understand is the `input_ids` already accounts for the positions of the to-be-inserted image embeddings. @@ -375,22 +366,19 @@ def forward( This way, the `positions` and `attn_metadata` are consistent with the `input_ids`. - The model takes two types of image inputs: - PIXEL_VALUES and IMAGE_FEATURES. - The following shows how each maps to huggingface implementation. - PIXEL_VALUES: - - https://github.com/huggingface/transformers/blob/07bdbeb/src/transformers/models/llava/modeling_llava.py#L353 - IMAGE_FEATURES: - - https://github.com/huggingface/transformers/blob/07bdbeb/src/transformers/models/llava/modeling_llava.py#L430 - before going through the multi modal projector. - Args: input_ids: Flattened (concatenated) input_ids corresponding to a batch. - pixel_values: For PIXEL_VALUES, expects a batch with shape - [1, 3, 336, 336]. - image_features: For IMAGE_FEATURES, expects a batch with shape - [1, 576, 1024]. + pixel_values: The pixels in each grid patch for each input image. + Expects a batch with shape `[1, num_patches, 3, 336, 336]`. + image_sizes: The original `(width, height)` for each input image. + Expects a batch with shape `[1, 2]`. + + See also: + Each input maps to huggingface implementation, as follows: + + - `pixel_values`: https://github.com/huggingface/transformers/blob/v4.41.1/src/transformers/models/llava_next/modeling_llava_next.py#L690 + - `image_sizes`: https://github.com/huggingface/transformers/blob/v4.41.1/src/transformers/models/llava_next/modeling_llava_next.py#L691 """ image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/usage/usage_lib.py b/vllm/usage/usage_lib.py index 40a954a29493e..afb3007a528b4 100644 --- a/vllm/usage/usage_lib.py +++ b/vllm/usage/usage_lib.py @@ -16,6 +16,7 @@ import torch import vllm.envs as envs +from vllm.version import __version__ as VLLM_VERSION _config_home = envs.VLLM_CONFIG_ROOT _USAGE_STATS_JSON_PATH = os.path.join(_config_home, "vllm/usage_stats.json") @@ -163,9 +164,8 @@ def _report_usage_once(self, model_architecture: str, ]) # vLLM information - import vllm # delayed import to prevent circular import self.context = usage_context.value - self.vllm_version = vllm.__version__ + self.vllm_version = VLLM_VERSION self.model_architecture = model_architecture # Metadata diff --git a/vllm/utils.py b/vllm/utils.py index 56dc922aaaeb8..ef0602987a9e3 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -146,6 +146,15 @@ def is_neuron() -> bool: return transformers_neuronx is not None +@lru_cache(maxsize=None) +def is_tpu() -> bool: + try: + import libtpu + except ImportError: + libtpu = None + return libtpu is not None + + @lru_cache(maxsize=None) def get_max_shared_memory_bytes(gpu: int = 0) -> int: """Returns the maximum shared memory per thread block in bytes.""" @@ -546,6 +555,11 @@ def maybe_expand_dim(tensor: torch.Tensor, return tensor +def get_dtype_size(dtype: torch.dtype) -> int: + """Get the size of the data type in bytes.""" + return torch.tensor([], dtype=dtype).element_size() + + def merge_dicts(dict1: Dict[Any, List[Any]], dict2: Dict[Any, List[Any]]) -> Dict[Any, List[Any]]: """Merge 2 dicts that have key -> List of items. @@ -679,3 +693,38 @@ def inner(*args, **kwargs): return inner # type: ignore return wrapper + + +@lru_cache(maxsize=8) +def _cuda_device_count_stateless( + cuda_visible_devices: Optional[str] = None) -> int: + # Note: cuda_visible_devices is not used, but we keep it as an argument for + # LRU Cache purposes. + + # Code below is based on + # https://github.com/pytorch/pytorch/blob/ + # c1cd946818442aca8c7f812b16d187ce1586c3bc/ + # torch/cuda/__init__.py#L831C1-L831C17 + import torch.cuda + import torch.version + + if not torch.cuda._is_compiled(): + return 0 + # bypass _device_count_nvml() if rocm (not supported) + nvml_count = -1 if torch.version.hip else torch.cuda._device_count_nvml() + r = torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count + return r + + +def cuda_device_count_stateless() -> int: + """Get number of CUDA devices, caching based on the value of + CUDA_VISIBLE_DEVICES at the time of call. + + This should be used instead of torch.cuda.device_count() + unless CUDA_VISIBLE_DEVICES has already been set to the desired + value.""" + + # This can be removed and simply replaced with torch.cuda.get_device_count + # after https://github.com/pytorch/pytorch/pull/122815 is released. + + return _cuda_device_count_stateless(envs.CUDA_VISIBLE_DEVICES) diff --git a/vllm/version.py b/vllm/version.py new file mode 100644 index 0000000000000..db951e35dac97 --- /dev/null +++ b/vllm/version.py @@ -0,0 +1,2 @@ +# UPSTREAM SYNC: take downstream +__version__ = "0.5.0" diff --git a/vllm/worker/cache_engine.py b/vllm/worker/cache_engine.py index 2f0e59f7ae7c9..341b177d4af2a 100644 --- a/vllm/worker/cache_engine.py +++ b/vllm/worker/cache_engine.py @@ -6,7 +6,8 @@ from vllm.attention import get_attn_backend from vllm.config import CacheConfig, ModelConfig, ParallelConfig from vllm.logger import init_logger -from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, is_pin_memory_available +from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size, + is_pin_memory_available) logger = init_logger(__name__) @@ -108,9 +109,5 @@ def get_cache_block_size( dtype = model_config.dtype else: dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype] - dtype_size = _get_dtype_size(dtype) + dtype_size = get_dtype_size(dtype) return dtype_size * total - - -def _get_dtype_size(dtype: torch.dtype) -> int: - return torch.tensor([], dtype=dtype).element_size() diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index eaf43247d4fc5..d539f56937be1 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -343,8 +343,8 @@ def execute_model( "kv_caches": kv_caches, "attn_metadata": attn_metadata, } - if self.vision_language_config: - execute_model_kwargs.update({"image_input": multi_modal_input}) + if self.vision_language_config and multi_modal_input is not None: + execute_model_kwargs.update(multi_modal_input) hidden_states = model_executable(**execute_model_kwargs) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 99b12293a0244..476e9ba3bb463 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -13,13 +13,14 @@ ModelConfig, ParallelConfig, SchedulerConfig, VisionLanguageConfig) from vllm.distributed import broadcast_tensor_dict -from vllm.distributed.communication_op import graph_capture +from vllm.distributed.parallel_state import graph_capture from vllm.logger import init_logger from vllm.lora.layers import LoRAMapping from vllm.lora.request import LoRARequest from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager from vllm.model_executor import SamplingMetadata from vllm.model_executor.model_loader import get_model +from vllm.model_executor.model_loader.tensorizer import TensorizerConfig from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.sampling_params import SamplingParams from vllm.sequence import SamplerOutput, SequenceData, SequenceGroupMetadata @@ -222,6 +223,16 @@ def save_sharded_state( max_size=max_size, ) + def save_tensorized_model( + self, + tensorizer_config: TensorizerConfig, + ) -> None: + from vllm.model_executor.model_loader.loader import TensorizerLoader + TensorizerLoader.save_model( + self.model, + tensorizer_config=tensorizer_config, + ) + def get_max_block_per_batch(self) -> int: block_size = self.block_size return (self.max_seq_len_to_capture + block_size - 1) // block_size diff --git a/vllm/worker/tpu_model_runner.py b/vllm/worker/tpu_model_runner.py new file mode 100644 index 0000000000000..5003d3b0ca440 --- /dev/null +++ b/vllm/worker/tpu_model_runner.py @@ -0,0 +1,525 @@ +import time +from typing import List, Optional, Tuple + +import numpy as np +import torch +import torch.nn as nn +import torch_xla.core.xla_model as xm + +from vllm.attention import AttentionMetadata, get_attn_backend +from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, ModelConfig, + ParallelConfig, SchedulerConfig, VisionLanguageConfig) +from vllm.logger import init_logger +from vllm.model_executor.model_loader import get_model +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import (CompletionSequenceGroupOutput, Logprob, + SamplerOutput, SequenceGroupMetadata, + SequenceOutput) +from vllm.utils import make_tensor_with_pad + +logger = init_logger(__name__) + +_PAD_SLOT_ID = 0 # FIXME(woosuk) + + +class TPUModelRunner: + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + cache_config: CacheConfig, + load_config: LoadConfig, + vision_language_config: Optional[VisionLanguageConfig] = None, + ): + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.device_config = device_config + self.cache_config = cache_config + self.load_config = load_config + self.vision_language_config = vision_language_config + + self.block_size = self.cache_config.block_size + self.max_num_blocks_per_seq = (self.model_config.max_model_len // + self.block_size) + self.block_tables = np.zeros( + (self.scheduler_config.max_num_seqs, self.max_num_blocks_per_seq), + dtype=np.int32) + self.attn_backend = get_attn_backend( + self.model_config.get_num_attention_heads(self.parallel_config), + self.model_config.get_head_size(), + self.model_config.get_num_kv_heads(self.parallel_config), + self.model_config.get_sliding_window(), + self.model_config.dtype, + self.cache_config.cache_dtype, + self.block_size, + False, + ) + + def load_model(self) -> None: + self.device = self.device_config.device + + model = get_model( + model_config=self.model_config, + load_config=self.load_config, + device_config=self.device_config, + parallel_config=self.parallel_config, + cache_config=self.cache_config, + scheduler_config=self.scheduler_config, + vision_language_config=self.vision_language_config, + lora_config=None, + ) + xm.wait_device_ops() + + model = ModelWrapper(model) + self.model = torch.compile(model, backend="openxla", fullgraph=True) + + def _dummy_run( + self, + batch_size: int, + seq_len: int, + kv_caches: List[Tuple[torch.Tensor, torch.Tensor]], + is_prompt: bool, + ) -> None: + if is_prompt: + seq_len = (seq_len + 15) // 16 * 16 + token_ids = torch.zeros((batch_size, seq_len), + dtype=torch.int32, + device=self.device) + position_ids = torch.zeros((batch_size, seq_len), + dtype=torch.int32, + device=self.device) + slot_mapping = torch.zeros((batch_size, seq_len), + dtype=torch.int64, + device=self.device) + attn_metadata = self.attn_backend.make_metadata( + num_prefills=batch_size, + num_prefill_tokens=batch_size * seq_len, + num_decode_tokens=0, + slot_mapping=slot_mapping, + block_tables=None, + context_lens=None, + ) + input_lens = torch.ones((batch_size, ), + dtype=torch.int32, + device=self.device) + else: + assert seq_len == 1 + token_ids = torch.zeros((batch_size, seq_len), + dtype=torch.int32, + device=self.device) + position_ids = torch.zeros((batch_size, seq_len), + dtype=torch.int32, + device=self.device) + slot_mapping = torch.zeros((batch_size, seq_len), + dtype=torch.int64, + device=self.device) + block_tables = torch.zeros( + (batch_size, self.max_num_blocks_per_seq), + dtype=torch.int32, + device=self.device) + context_lens = torch.ones((batch_size, ), + dtype=torch.int32, + device=self.device) + input_lens = torch.ones((batch_size, ), + dtype=torch.int32, + device=self.device) + attn_metadata = self.attn_backend.make_metadata( + num_prefills=0, + num_prefill_tokens=0, + num_decode_tokens=batch_size * seq_len, + slot_mapping=slot_mapping, + block_tables=block_tables, + context_lens=context_lens, + ) + t = torch.ones((batch_size, ), dtype=torch.float32, device=self.device) + p = torch.ones((batch_size, ), dtype=torch.float32, device=self.device) + + # Dummy run. + self.model(token_ids, position_ids, kv_caches, attn_metadata, + input_lens, t, p) + + def warmup_model( + self, + kv_caches: List[Tuple[torch.Tensor, torch.Tensor]], + ) -> None: + # Prefill + logger.info("Compiling the model with different input shapes...") + start = time.time() + for batch_size in [1]: + seq_len = 16 + while True: + self._dummy_run(batch_size, seq_len, kv_caches, is_prompt=True) + xm.wait_device_ops() + logger.info("batch_size: %d, seq_len: %d", batch_size, seq_len) + + if seq_len >= self.model_config.max_model_len: + break + num_tokens = batch_size * seq_len + if num_tokens >= self.scheduler_config.max_num_batched_tokens: + break + seq_len = seq_len * 2 + + end = time.time() + logger.info("Compilation for prefill done in %.2f s.", end - start) + + # Decode + start = time.time() + seq_len = 1 + batch_size = 1 + while True: + self._dummy_run(batch_size, seq_len, kv_caches, is_prompt=False) + xm.wait_device_ops() + logger.info("batch_size: %d, seq_len: %d", batch_size, seq_len) + + if batch_size >= self.scheduler_config.max_num_seqs: + break + batch_size = batch_size + 16 if batch_size >= 16 else batch_size * 2 + + end = time.time() + logger.info("Compilation for decode done in %.2f s.", end - start) + + def _prepare_prompt( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ): + assert len(seq_group_metadata_list) > 0 + input_tokens: List[List[int]] = [] + input_positions: List[List[int]] = [] + prompt_lens: List[int] = [] + slot_mapping: List[List[int]] = [] + + for seq_group_metadata in seq_group_metadata_list: + assert seq_group_metadata.is_prompt + seq_ids = list(seq_group_metadata.seq_data.keys()) + assert len(seq_ids) == 1 + seq_id = seq_ids[0] + + seq_data = seq_group_metadata.seq_data[seq_id] + # Could include output tokens when a request is preempted. + prompt_tokens = seq_data.get_token_ids() + prompt_len = len(prompt_tokens) + prompt_lens.append(prompt_len) + + input_tokens.append(prompt_tokens) + input_positions.append(list(range(prompt_len))) + + assert seq_group_metadata.block_tables is not None + block_table = seq_group_metadata.block_tables[seq_id] + slot_mapping.append([]) + for i in range(prompt_len): + block_number = block_table[i // self.block_size] + block_offset = i % self.block_size + slot = block_number * self.block_size + block_offset + slot_mapping[-1].append(slot) + + assert len(prompt_lens) > 0 + num_prefills = len(prompt_lens) + num_prefill_tokens = sum(prompt_lens) + + # Add paddings to make the shape [batch_size, max_prompt_len] where + # max_prompt_len is smallest power of 2 that is greater than or equal + # to the maximum prompt length. + # We need the 2D input shape because the Pallas FlashAttention kernel + # does not support packed 1D inputs. + # We pad the seq_len to powers of 2 to reduce the compilation overhead. + max_prompt_len = _get_padded_prefill_len(max(prompt_lens)) + input_tokens = make_tensor_with_pad(input_tokens, + max_prompt_len, + pad=0, + dtype=torch.int32, + device=self.device) + input_positions = make_tensor_with_pad(input_positions, + max_prompt_len, + pad=0, + dtype=torch.int32, + device=self.device) + slot_mapping = make_tensor_with_pad(slot_mapping, + max_prompt_len, + pad=_PAD_SLOT_ID, + dtype=torch.int64, + device=self.device) + prompt_lens = torch.tensor(prompt_lens, + dtype=torch.int32, + device=self.device) + attn_metadata = self.attn_backend.make_metadata( + num_prefills=num_prefills, + num_prefill_tokens=num_prefill_tokens, # NOTE: This is not used. + num_decode_tokens=0, + slot_mapping=slot_mapping, + block_tables=None, + context_lens=None, + ) + return input_tokens, input_positions, attn_metadata, prompt_lens + + def _prepare_decode( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + ): + assert len(seq_group_metadata_list) > 0 + input_tokens: List[List[int]] = [] + input_positions: List[List[int]] = [] + slot_mapping: List[List[int]] = [] + context_lens: List[int] = [] + num_seq_groups = len(seq_group_metadata_list) + batch_size = _get_padded_batch_size(num_seq_groups) + + for i, seq_group_metadata in enumerate(seq_group_metadata_list): + assert not seq_group_metadata.is_prompt + + seq_ids = list(seq_group_metadata.seq_data.keys()) + + for seq_id in seq_ids: + seq_data = seq_group_metadata.seq_data[seq_id] + generation_token = seq_data.get_last_token_id() + input_tokens.append([generation_token]) + + seq_len = seq_data.get_len() + position = seq_len - 1 + input_positions.append([position]) + context_lens.append(seq_len) + + assert seq_group_metadata.block_tables is not None + block_table = seq_group_metadata.block_tables[seq_id] + self.block_tables[i, :len(block_table)] = block_table + + block_number = block_table[position // self.block_size] + block_offset = position % self.block_size + slot = block_number * self.block_size + block_offset + slot_mapping.append([slot]) + + num_paddings = batch_size - num_seq_groups + input_tokens = input_tokens + [[0]] * num_paddings + input_positions = input_positions + [[0]] * num_paddings + slot_mapping = slot_mapping + [[_PAD_SLOT_ID]] * num_paddings + context_lens = context_lens + [0] * num_paddings + + input_tokens = torch.tensor(input_tokens, + dtype=torch.int32, + device=self.device) + input_positions = torch.tensor(input_positions, + dtype=torch.int32, + device=self.device) + slot_mapping = torch.tensor(slot_mapping, + dtype=torch.int64, + device=self.device) + context_lens = torch.tensor(context_lens, + dtype=torch.int32, + device=self.device) + block_tables = torch.tensor(self.block_tables[:batch_size], + dtype=torch.int32, + device=self.device) + input_lens = torch.tensor([1] * batch_size, + dtype=torch.int32, + device=self.device) + attn_metadata = self.attn_backend.make_metadata( + num_prefills=0, + num_prefill_tokens=0, + num_decode_tokens=batch_size, + slot_mapping=slot_mapping, + block_tables=block_tables, + context_lens=context_lens, + ) + return input_tokens, input_positions, attn_metadata, input_lens + + def _prepare_sample( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + padded_batch_size: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + assert len(seq_group_metadata_list) > 0 + t = [] + p = [] + for seq_group_metadata in seq_group_metadata_list: + assert seq_group_metadata.sampling_params is not None + sampling_params = seq_group_metadata.sampling_params + + t.append(sampling_params.temperature + if sampling_params.temperature >= 1e-5 else 1e-5) + p.append(sampling_params.top_p) + num_paddings = padded_batch_size - len(seq_group_metadata_list) + t += [1.0] * num_paddings + p += [1.0] * num_paddings + + t = torch.tensor(t, dtype=torch.float32, device=self.device) + p = torch.tensor(p, dtype=torch.float32, device=self.device) + return t, p + + def prepare_inputs( + self, + seq_group_metadata_list: Optional[List[SequenceGroupMetadata]], + ): + assert seq_group_metadata_list is not None + assert len(seq_group_metadata_list) > 0 + # NOTE: We assume that all sequences in the group are all prompts or + # all decodes. + if seq_group_metadata_list[0].is_prompt: + inputs = self._prepare_prompt(seq_group_metadata_list) + else: + inputs = self._prepare_decode(seq_group_metadata_list) + padded_batch_size = inputs[0].shape[0] + sample_inputs = self._prepare_sample(seq_group_metadata_list, + padded_batch_size) + return inputs + sample_inputs + + def _execute_model( + self, + seq_group_metadata_list: List[SequenceGroupMetadata], + kv_caches: List[Tuple[torch.Tensor, torch.Tensor]], + ) -> List[CompletionSequenceGroupOutput]: + inputs = self.prepare_inputs(seq_group_metadata_list) + next_token_ids = self.model(inputs[0], inputs[1], kv_caches, + *inputs[2:]) + next_token_ids = next_token_ids.cpu().tolist() + + i = 0 + sampler_outputs = [] + for seq_group_metadata in seq_group_metadata_list: + seq_outputs = [] + seq_ids = list(seq_group_metadata.seq_data.keys()) + for seq_id in seq_ids: + next_token_id = next_token_ids[i] + seq_outputs.append( + SequenceOutput(seq_id, next_token_id, + {next_token_id: Logprob(0.0)})) + i += 1 + sampler_outputs.append( + CompletionSequenceGroupOutput(seq_outputs, None)) + return sampler_outputs + + def execute_model( + self, + seq_group_metadata_list: Optional[List[SequenceGroupMetadata]], + kv_caches: List[Tuple[torch.Tensor, torch.Tensor]], + ) -> SamplerOutput: + assert seq_group_metadata_list is not None + if seq_group_metadata_list[0].is_prompt: + # NOTE(woosuk): To reduce the compilation time, we only compile the + # prefill inputs with batch size 1. Because the scheduler is not + # aware of this limitation, we need to handle batch size > 1 + # internally by calling the model multiple times and concatenating + # the outputs. + # FIXME(woosuk): This is a temporary hack to not change the existing + # scheduler. We need to fix this in the future. + sampler_outputs = [] + for seq_group_metadata in seq_group_metadata_list: + sampler_outputs += self._execute_model([seq_group_metadata], + kv_caches) + else: + sampler_outputs = self._execute_model(seq_group_metadata_list, + kv_caches) + return SamplerOutput(sampler_outputs) + + +class ModelWrapper(nn.Module): + + def __init__(self, model: nn.Module): + super().__init__() + self.model = model.eval() + + def forward( + self, + token_ids: torch.Tensor, + position_ids: torch.Tensor, + kv_caches: List[Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]], + attn_metadata: AttentionMetadata, + input_lens: torch.Tensor, + t: torch.Tensor, + p: torch.Tensor, + ) -> torch.Tensor: + """Executes the forward pass of the model and samples the next token. + + Args: + token_ids: The input token IDs of shape [batch_size, seq_len]. + position_ids: The input position IDs of shape [batch_size, seq_len]. + kv_caches: The key and value caches. They can be None during the + memory profiling at initialization. + attn_metadata: The Pallas attention metadata. + input_lens: The actual input lengths of shape [batch_size]. + t: The sampling temperature of shape [batch_size]. + p: The top-p probability of shape [batch_size]. + """ + batch_size, seq_len = token_ids.shape + # Calculate the positions to sample from. + base_indicies = torch.arange( + batch_size, dtype=torch.int32, device=input_lens.device) * seq_len + logits_indices = base_indicies + input_lens - 1 + + # FIXME(woosuk): This is a temporary hack to avoid using the existing + # sampler and sampling metadata. + sampling_metadata = SamplingMetadata( + seq_groups=[], + selected_token_indices=logits_indices, + categorized_sample_indices={}, + num_prompts=attn_metadata.num_prefills, + ) + + # Skip this in memory profiling at initialization. + if kv_caches[0][0] is not None: + # index_copy_(slot_mapping) only works when the inserted dimension + # is 0. However, the KV cache in the Pallas backend has the shape + # [num_kv_heads, num_blocks, block_size, head_size]. To make it + # work, we need to flatten the first three dimensions and modify + # the slot_mapping accordingly. + num_kv_heads, num_blocks, block_size, _ = kv_caches[0][0].shape + slot_mapping = attn_metadata.slot_mapping + slot_mapping = slot_mapping.flatten() + head_indicies = torch.arange(0, + num_kv_heads, + device=slot_mapping.device, + dtype=slot_mapping.dtype) + head_indicies *= block_size * num_blocks + slot_mapping = slot_mapping.repeat_interleave(num_kv_heads).view( + -1, num_kv_heads) + slot_mapping = slot_mapping + head_indicies.view(1, -1) + slot_mapping = slot_mapping.flatten() + attn_metadata.slot_mapping = slot_mapping + + hidden_states = self.model( + token_ids, + position_ids, + kv_caches, + attn_metadata, + ) + hidden_states = hidden_states.flatten(0, 1) + logits = self.model.compute_logits(hidden_states, sampling_metadata) + + logits = logits / t.unsqueeze(dim=1) + # FIXME(woosuk): Disabled top-p sampling since it's too slow. + # logits = _apply_top_p(logits, p.unsqueeze(dim=1)) + probs = torch.softmax(logits, dim=-1, dtype=torch.float32) + # FIXME(woosuk): best_of > 1 is not supported. + next_token_ids = torch.multinomial(probs, num_samples=1).squeeze(dim=1) + return next_token_ids + + +def _get_padded_prefill_len(x: int) -> int: + # NOTE(woosuk): The pallas FlashAttention kernel requires the sequence + # length to be a multiple of 16. We pad the prompt length to the nearest + # multiple of 16. This is also good for performance. + if x <= 16: + return 16 + return 1 << (x - 1).bit_length() + + +def _get_padded_batch_size(batch_size: int) -> int: + if batch_size <= 2: + return batch_size + elif batch_size <= 4: + return 4 + elif batch_size <= 8: + return 8 + else: + return ((batch_size + 15) // 16) * 16 + + +def _apply_top_p(logits: torch.Tensor, p: torch.Tensor) -> torch.Tensor: + logits_sorted = torch.sort(logits, dim=-1, descending=True).values + sorted_cum_probs = torch.cumsum(logits_sorted.softmax(dim=-1), dim=-1) + cutoff_index = torch.sum(sorted_cum_probs < p, dim=-1, keepdim=True) + cutoff_logit = torch.gather(logits_sorted, -1, cutoff_index) + logits = logits.masked_fill_(logits < cutoff_logit, -float("inf")) + return logits diff --git a/vllm/worker/tpu_worker.py b/vllm/worker/tpu_worker.py new file mode 100644 index 0000000000000..04576015dadbd --- /dev/null +++ b/vllm/worker/tpu_worker.py @@ -0,0 +1,198 @@ +import os +from typing import List, Optional, Tuple + +import torch +import torch_xla.core.xla_model as xm +import torch_xla.runtime as xr + +import vllm.envs as envs +from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, ModelConfig, + ParallelConfig, SchedulerConfig, VisionLanguageConfig) +from vllm.distributed import (ensure_model_parallel_initialized, + init_distributed_environment) +from vllm.logger import init_logger +from vllm.model_executor import set_random_seed +from vllm.sequence import ExecuteModelRequest, SamplerOutput +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size +from vllm.worker.tpu_model_runner import TPUModelRunner +from vllm.worker.worker_base import LoraNotSupportedWorkerBase + +logger = init_logger(__name__) + + +class TPUWorker(LoraNotSupportedWorkerBase): + + def __init__( + self, + model_config: ModelConfig, + parallel_config: ParallelConfig, + scheduler_config: SchedulerConfig, + device_config: DeviceConfig, + cache_config: CacheConfig, + load_config: LoadConfig, + vision_language_config: Optional[VisionLanguageConfig], + local_rank: int, + rank: int, + distributed_init_method: str, + ) -> None: + self.model_config = model_config + self.parallel_config = parallel_config + self.scheduler_config = scheduler_config + self.device_config = device_config + self.cache_config = cache_config + self.load_config = load_config + self.vision_language_config = vision_language_config + self.local_rank = local_rank + self.rank = rank + self.distributed_init_method = distributed_init_method + + assert self.device_config.device_type == "tpu" + if self.cache_config.cache_dtype == "auto": + self.cache_dtype = self.model_config.dtype + else: + self.cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[ + self.cache_config.cache_dtype] + + self.model_runner = TPUModelRunner(model_config, parallel_config, + scheduler_config, device_config, + cache_config, load_config, + vision_language_config) + + def init_device(self) -> None: + os.environ["PJRT_DEVICE"] = "TPU" + self.device = xm.xla_device() + self.device_config.device = self.device + torch.set_grad_enabled(False) + torch.set_default_dtype(self.model_config.dtype) + + # NOTE(woosuk): This is just a hack to initialize the TP group. + # This cannot perform the actual communication ops. + init_distributed_environment( + world_size=self.parallel_config.world_size, + rank=self.rank, + local_rank=self.local_rank, + distributed_init_method=self.distributed_init_method, + backend="gloo", + ) + ensure_model_parallel_initialized( + self.parallel_config.tensor_parallel_size, + self.parallel_config.pipeline_parallel_size) + + # Set random seed. + set_random_seed(self.model_config.seed) + xm.set_rng_state(self.model_config.seed, self.device) + + # Increase the cache size limit, which is the maximum number of + # dynamo graphs that can be compiled. + # NOTE(woosuk): Usually, we compile 10-15 graphs for prefill and + # 30-40 graphs for decode. 128 is an arbitrary safe number. + torch._dynamo.config.cache_size_limit = 128 + # Use persistent cache to avoid XLA recompilation. + # NOTE(woosuk): This does not completely eliminate the recompilation + # overhead because dynamo does not cache the compiled results. + xr.initialize_cache(os.path.expanduser(envs.VLLM_XLA_CACHE_PATH), + readonly=False) + + def load_model(self): + self.model_runner.load_model() + + def determine_num_available_blocks(self) -> Tuple[int, int]: + num_layers = self.model_config.get_num_layers(self.parallel_config) + head_size = self.model_config.get_head_size() + num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) + + kv_caches = [(None, None) for _ in range(num_layers)] + self.model_runner._dummy_run( + batch_size=1, + seq_len=self.scheduler_config.max_num_batched_tokens, + kv_caches=kv_caches, + is_prompt=True, + ) + # Synchronize before measuring the memory usage. + xm.wait_device_ops() + + m = xm.get_memory_info(self.device) + program_size = 1024 * 1024 * 1024 # 1GB + free_bytes = max(m["bytes_limit"] - m["bytes_used"] - program_size, 0) + kv_cache_bytes = int(free_bytes * + self.cache_config.gpu_memory_utilization) + kv_cache_dtype_btyes = get_dtype_size(self.cache_dtype) + block_size = self.cache_config.block_size + num_tpu_blocks = (kv_cache_bytes // + (kv_cache_dtype_btyes * block_size * num_layers * 2 * + head_size * num_kv_heads)) + num_tpu_blocks = (num_tpu_blocks // 8) * 8 # Round down to 8. + return num_tpu_blocks, 0 + + def initialize_cache( + self, + num_gpu_blocks: int, + num_cpu_blocks: int, + ) -> None: + self.cache_config.num_gpu_blocks = num_gpu_blocks + self.cache_config.num_cpu_blocks = num_cpu_blocks + self.block_size = self.cache_config.block_size + + dtype = self.cache_dtype + num_layers = self.model_config.get_num_layers(self.parallel_config) + num_kv_heads = self.model_config.get_num_kv_heads(self.parallel_config) + head_size = self.model_config.get_head_size() + + self.tpu_cache = [] + tpu_cache_shape = self.model_runner.attn_backend.get_kv_cache_shape( + num_gpu_blocks, self.block_size, num_kv_heads, head_size) + for _ in range(num_layers): + key_cache = torch.zeros(tpu_cache_shape, + dtype=dtype, + device=self.device) + value_cache = torch.zeros_like(key_cache) + self.tpu_cache.append((key_cache, value_cache)) + self._warmup_model() + + def _warmup_model(self) -> None: + # FIXME(woosuk): Here we are abusing `enforce_eager` which is defined + # for CUDA graphs. We should refactor this part. + if not self.model_config.enforce_eager: + # Warm up the model with all possible input shapes so that + # compilation never happens during the actual execution. + # This may take ~30 mins for the first run and ~20 mins for the + # subsequent runs. + # If `enforce_eager` is True, the ahead-of-time compilation is + # skipped and the compilation happens during the actual execution, + # which is bad for performance but useful for development. + self.model_runner.warmup_model(self.tpu_cache) + + def get_cache_block_size_bytes(self) -> int: + head_size = self.model_config.get_head_size() + num_heads = self.model_config.get_num_kv_heads(self.parallel_config) + num_layers = self.model_config.get_num_layers(self.parallel_config) + + key_cache_block = self.cache_config.block_size * num_heads * head_size + value_cache_block = key_cache_block + total = num_layers * (key_cache_block + value_cache_block) + dtype_size = get_dtype_size(self.cache_dtype) + return dtype_size * total + + def execute_model( + self, + execute_model_req: Optional[ExecuteModelRequest] = None + ) -> List[SamplerOutput]: + if execute_model_req is None: + return [] + + seq_group_metadata_list = execute_model_req.seq_group_metadata_list + num_seq_groups = len(seq_group_metadata_list) + if num_seq_groups == 0: + return [] + + # Currently, TPUWorker does not support swapping. + # TODO(woosuk): Support block copying. + assert len(execute_model_req.blocks_to_swap_in) == 0, ( + "Swapping is not supported for the TPU backend.") + assert len(execute_model_req.blocks_to_swap_out) == 0, ( + "Swapping is not supported for the TPU backend.") + assert len(execute_model_req.blocks_to_copy) == 0 + + output = self.model_runner.execute_model(seq_group_metadata_list, + self.tpu_cache) + return [output] diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index 10411a2bf7a10..7a378a862d0c0 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -15,6 +15,7 @@ set_custom_all_reduce) from vllm.lora.request import LoRARequest from vllm.model_executor import set_random_seed +from vllm.model_executor.model_loader.tensorizer import TensorizerConfig from vllm.sequence import ExecuteModelRequest, PoolerOutput, SamplerOutput from vllm.worker.cache_engine import CacheEngine from vllm.worker.embedding_model_runner import EmbeddingModelRunner @@ -132,6 +133,13 @@ def save_sharded_state( max_size=max_size, ) + def save_tensorized_model( + self, + tensorizer_config: TensorizerConfig, + ) -> None: + self.model_runner.save_tensorized_model( + tensorizer_config=tensorizer_config, ) + @torch.inference_mode() def determine_num_available_blocks(self) -> Tuple[int, int]: """Profiles the peak memory usage of the model to determine how many