Skip to content

Commit

Permalink
[CI] Nits for bad initialization of SeqGroup in testing (vllm-project…
Browse files Browse the repository at this point in the history
  • Loading branch information
robertgshaw2-neuralmagic authored May 10, 2024
1 parent 2e7796f commit fcc2994
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 8 deletions.
13 changes: 9 additions & 4 deletions tests/core/test_block_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,10 @@ def test_append_slot_cow():
child = prompt.fork(new_seq_id=2)

# Allocate space for the sequence group.
seq_group = SequenceGroup("1", [prompt, child], SamplingParams(),
time.time(), time.perf_counter)
seq_group = SequenceGroup(request_id="1",
seqs=[prompt, child],
arrival_time=time.time(),
sampling_params=SamplingParams())
block_manager.allocate(seq_group)

# Fork and append a new token id. We expect a COW to be scheduled.
Expand Down Expand Up @@ -303,8 +305,11 @@ def test_sliding_window_multi_seq():
assert block_manager.get_num_free_gpu_blocks() == num_gpu_blocks

parent = Sequence(1, "one two three", [0, 1, 2], block_size)
seq_group = SequenceGroup("1", [parent], SamplingParams(), time.time(),
None)
seq_group = SequenceGroup(request_id="1",
seqs=[parent],
arrival_time=time.time(),
sampling_params=SamplingParams(),
lora_request=None)
block_manager.allocate(seq_group)

# assert the number of blocks allocated is correct
Expand Down
11 changes: 7 additions & 4 deletions tests/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,13 @@ def create_dummy_prompt(
prompt_tokens = list(range(prompt_length))
prompt_str = " ".join([str(t) for t in prompt_tokens])
prompt = Sequence(int(request_id), prompt_str, prompt_tokens, block_size)
seq_group = SequenceGroup(
request_id, [prompt],
SamplingParams(use_beam_search=use_beam_search, best_of=best_of),
time.time(), lora_request)
seq_group = SequenceGroup(request_id=request_id,
seqs=[prompt],
arrival_time=time.time(),
sampling_params=SamplingParams(
use_beam_search=use_beam_search,
best_of=best_of),
lora_request=lora_request)

return prompt, seq_group

Expand Down

0 comments on commit fcc2994

Please sign in to comment.