[beam search] add output for manually checking the correctness (#8684)
This commit is contained in:
parent
0455c46ed4
commit
0faab90eb0
@ -11,7 +11,7 @@ import pytest
|
|||||||
# 3. Use the model "huggyllama/llama-7b".
|
# 3. Use the model "huggyllama/llama-7b".
|
||||||
MAX_TOKENS = [128]
|
MAX_TOKENS = [128]
|
||||||
BEAM_WIDTHS = [4]
|
BEAM_WIDTHS = [4]
|
||||||
MODELS = ["facebook/opt-125m"]
|
MODELS = ["TinyLlama/TinyLlama-1.1B-Chat-v1.0"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("model", MODELS)
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
@ -37,8 +37,15 @@ def test_beam_search_single_input(
|
|||||||
beam_width, max_tokens)
|
beam_width, max_tokens)
|
||||||
|
|
||||||
for i in range(len(example_prompts)):
|
for i in range(len(example_prompts)):
|
||||||
hf_output_ids, _ = hf_outputs[i]
|
hf_output_ids, hf_output_texts = hf_outputs[i]
|
||||||
vllm_output_ids, _ = vllm_outputs[i]
|
vllm_output_ids, vllm_output_texts = vllm_outputs[i]
|
||||||
|
for i, (hf_text,
|
||||||
|
vllm_text) in enumerate(zip(hf_output_texts,
|
||||||
|
vllm_output_texts)):
|
||||||
|
print(f">>>{i}-th hf output:")
|
||||||
|
print(hf_text)
|
||||||
|
print(f">>>{i}-th vllm output:")
|
||||||
|
print(vllm_text)
|
||||||
assert len(hf_output_ids) == len(vllm_output_ids)
|
assert len(hf_output_ids) == len(vllm_output_ids)
|
||||||
for j in range(len(hf_output_ids)):
|
for j in range(len(hf_output_ids)):
|
||||||
assert hf_output_ids[j] == vllm_output_ids[j], (
|
assert hf_output_ids[j] == vllm_output_ids[j], (
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user