add gptq_marlin test for bug report https://github.com/vllm-project/vllm/issues/5088 (#5145)
This commit is contained in:
parent
e691918e3b
commit
d919ecc771
@ -40,6 +40,9 @@ MODELS = [
|
||||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit-128g-actorder_True"),
|
||||
# 8-bit, act_order==True, group_size=32
|
||||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit-32g-actorder_True"),
|
||||
|
||||
# 4-bit, act_order==True, group_size=128
|
||||
("TechxGenus/gemma-1.1-2b-it-GPTQ", "main")
|
||||
]
|
||||
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user