35 lines
1.1 KiB
JSON
35 lines
1.1 KiB
JSON
[
|
|
{
|
|
"test_name": "throughput_llama8B_tp1",
|
|
"parameters": {
|
|
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
"tensor_parallel_size": 1,
|
|
"load_format": "dummy",
|
|
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200,
|
|
"backend": "vllm"
|
|
}
|
|
},
|
|
{
|
|
"test_name": "throughput_llama70B_tp4",
|
|
"parameters": {
|
|
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
"tensor_parallel_size": 4,
|
|
"load_format": "dummy",
|
|
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200,
|
|
"backend": "vllm"
|
|
}
|
|
},
|
|
{
|
|
"test_name": "throughput_mixtral8x7B_tp2",
|
|
"parameters": {
|
|
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
"tensor_parallel_size": 2,
|
|
"load_format": "dummy",
|
|
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
"num_prompts": 200,
|
|
"backend": "vllm"
|
|
}
|
|
}
|
|
] |