[ci][test] adjust max wait time for cpu offloading test (#7709)
This commit is contained in:
parent
6e4658c7aa
commit
9e51b6a626
@ -14,10 +14,12 @@ def test_cpu_offload_fp8():
|
|||||||
# Test quantization of an unquantized checkpoint
|
# Test quantization of an unquantized checkpoint
|
||||||
compare_two_settings("meta-llama/Meta-Llama-3-8B-Instruct",
|
compare_two_settings("meta-llama/Meta-Llama-3-8B-Instruct",
|
||||||
["--quantization", "fp8"],
|
["--quantization", "fp8"],
|
||||||
["--quantization", "fp8", "--cpu-offload-gb", "2"])
|
["--quantization", "fp8", "--cpu-offload-gb", "2"],
|
||||||
|
max_wait_seconds=480)
|
||||||
# Test loading a quantized checkpoint
|
# Test loading a quantized checkpoint
|
||||||
compare_two_settings("neuralmagic/Meta-Llama-3-8B-Instruct-FP8", [],
|
compare_two_settings("neuralmagic/Meta-Llama-3-8B-Instruct-FP8", [],
|
||||||
["--cpu-offload-gb", "2"])
|
["--cpu-offload-gb", "2"],
|
||||||
|
max_wait_seconds=480)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
|
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
|
||||||
@ -25,11 +27,13 @@ def test_cpu_offload_fp8():
|
|||||||
def test_cpu_offload_gptq():
|
def test_cpu_offload_gptq():
|
||||||
# Test GPTQ Marlin
|
# Test GPTQ Marlin
|
||||||
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4", [],
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4", [],
|
||||||
["--cpu-offload-gb", "1"])
|
["--cpu-offload-gb", "1"],
|
||||||
|
max_wait_seconds=480)
|
||||||
# Test GPTQ
|
# Test GPTQ
|
||||||
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4",
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4",
|
||||||
["--quantization", "gptq"],
|
["--quantization", "gptq"],
|
||||||
["--quantization", "gptq", "--cpu-offload-gb", "1"])
|
["--quantization", "gptq", "--cpu-offload-gb", "1"],
|
||||||
|
max_wait_seconds=480)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not is_quant_method_supported("awq_marlin"),
|
@pytest.mark.skipif(not is_quant_method_supported("awq_marlin"),
|
||||||
@ -37,11 +41,13 @@ def test_cpu_offload_gptq():
|
|||||||
def test_cpu_offload_awq():
|
def test_cpu_offload_awq():
|
||||||
# Test AWQ Marlin
|
# Test AWQ Marlin
|
||||||
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ", [],
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ", [],
|
||||||
["--cpu-offload-gb", "1"])
|
["--cpu-offload-gb", "1"],
|
||||||
|
max_wait_seconds=480)
|
||||||
# Test AWQ
|
# Test AWQ
|
||||||
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ",
|
compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ",
|
||||||
["--quantization", "awq"],
|
["--quantization", "awq"],
|
||||||
["--quantization", "awq", "--cpu-offload-gb", "1"])
|
["--quantization", "awq", "--cpu-offload-gb", "1"],
|
||||||
|
max_wait_seconds=480)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
|
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
|
||||||
@ -49,11 +55,14 @@ def test_cpu_offload_awq():
|
|||||||
def test_cpu_offload_compressed_tensors():
|
def test_cpu_offload_compressed_tensors():
|
||||||
# Test wNa16
|
# Test wNa16
|
||||||
compare_two_settings("nm-testing/tinyllama-oneshot-w4a16-channel-v2", [],
|
compare_two_settings("nm-testing/tinyllama-oneshot-w4a16-channel-v2", [],
|
||||||
["--cpu-offload-gb", "1"])
|
["--cpu-offload-gb", "1"],
|
||||||
|
max_wait_seconds=480)
|
||||||
# Test w4a16_marlin24
|
# Test w4a16_marlin24
|
||||||
compare_two_settings("nm-testing/llama7b-one-shot-2_4-w4a16-marlin24-t",
|
compare_two_settings("nm-testing/llama7b-one-shot-2_4-w4a16-marlin24-t",
|
||||||
[], ["--cpu-offload-gb", "1"])
|
[], ["--cpu-offload-gb", "1"],
|
||||||
|
max_wait_seconds=480)
|
||||||
# Test w8a8
|
# Test w8a8
|
||||||
compare_two_settings(
|
compare_two_settings(
|
||||||
"nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change", [],
|
"nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change", [],
|
||||||
["--cpu-offload-gb", "1"])
|
["--cpu-offload-gb", "1"],
|
||||||
|
max_wait_seconds=480)
|
||||||
|
|||||||
@ -56,16 +56,14 @@ VLLM_PATH = Path(__file__).parent.parent
|
|||||||
|
|
||||||
class RemoteOpenAIServer:
|
class RemoteOpenAIServer:
|
||||||
DUMMY_API_KEY = "token-abc123" # vLLM's OpenAI server does not need API key
|
DUMMY_API_KEY = "token-abc123" # vLLM's OpenAI server does not need API key
|
||||||
MAX_START_WAIT_S = 240 # wait for server to start for 240 seconds
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(self,
|
||||||
self,
|
model: str,
|
||||||
model: str,
|
cli_args: List[str],
|
||||||
cli_args: List[str],
|
*,
|
||||||
*,
|
env_dict: Optional[Dict[str, str]] = None,
|
||||||
env_dict: Optional[Dict[str, str]] = None,
|
auto_port: bool = True,
|
||||||
auto_port: bool = True,
|
max_wait_seconds: Optional[float] = None) -> None:
|
||||||
) -> None:
|
|
||||||
if auto_port:
|
if auto_port:
|
||||||
if "-p" in cli_args or "--port" in cli_args:
|
if "-p" in cli_args or "--port" in cli_args:
|
||||||
raise ValueError("You have manually specified the port"
|
raise ValueError("You have manually specified the port"
|
||||||
@ -90,8 +88,9 @@ class RemoteOpenAIServer:
|
|||||||
env=env,
|
env=env,
|
||||||
stdout=sys.stdout,
|
stdout=sys.stdout,
|
||||||
stderr=sys.stderr)
|
stderr=sys.stderr)
|
||||||
|
max_wait_seconds = max_wait_seconds or 240
|
||||||
self._wait_for_server(url=self.url_for("health"),
|
self._wait_for_server(url=self.url_for("health"),
|
||||||
timeout=self.MAX_START_WAIT_S)
|
timeout=max_wait_seconds)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self
|
return self
|
||||||
@ -145,7 +144,8 @@ def compare_two_settings(model: str,
|
|||||||
arg1: List[str],
|
arg1: List[str],
|
||||||
arg2: List[str],
|
arg2: List[str],
|
||||||
env1: Optional[Dict[str, str]] = None,
|
env1: Optional[Dict[str, str]] = None,
|
||||||
env2: Optional[Dict[str, str]] = None):
|
env2: Optional[Dict[str, str]] = None,
|
||||||
|
max_wait_seconds: Optional[float] = None) -> None:
|
||||||
"""
|
"""
|
||||||
Launch API server with two different sets of arguments/environments
|
Launch API server with two different sets of arguments/environments
|
||||||
and compare the results of the API calls.
|
and compare the results of the API calls.
|
||||||
@ -164,7 +164,10 @@ def compare_two_settings(model: str,
|
|||||||
token_ids = tokenizer(prompt)["input_ids"]
|
token_ids = tokenizer(prompt)["input_ids"]
|
||||||
results = []
|
results = []
|
||||||
for args, env in ((arg1, env1), (arg2, env2)):
|
for args, env in ((arg1, env1), (arg2, env2)):
|
||||||
with RemoteOpenAIServer(model, args, env_dict=env) as server:
|
with RemoteOpenAIServer(model,
|
||||||
|
args,
|
||||||
|
env_dict=env,
|
||||||
|
max_wait_seconds=max_wait_seconds) as server:
|
||||||
client = server.get_client()
|
client = server.get_client()
|
||||||
|
|
||||||
# test models list
|
# test models list
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user