vllm/vllm/model_executor/models
2023-09-07 15:49:52 -07:00
..
__init__.py Add support for aquila (#663) 2023-08-22 00:13:36 -07:00
aquila.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
baichuan.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
bloom.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
falcon.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
gpt2.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
gpt_bigcode.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
gpt_j.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
gpt_neox.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
internlm.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
llama.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
mpt.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
opt.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00
qwen.py Enable safetensors loading for all models (#974) 2023-09-07 15:49:52 -07:00