| .. |
|
__init__.py
|
[Experimental] Add multi-LoRA support (#1804)
|
2024-01-23 15:26:37 -08:00 |
|
conftest.py
|
[Bugfix] Fix LoRA loading check (#4138)
|
2024-04-19 00:59:54 -07:00 |
|
test_baichuan.py
|
[Kernel] Add punica dimension for Baichuan-13B (#4053)
|
2024-04-13 07:55:05 -07:00 |
|
test_chatglm3.py
|
Enable more models to inference based on LoRA (#3382)
|
2024-03-25 18:09:31 -07:00 |
|
test_gemma.py
|
Add LoRA support for Gemma (#3050)
|
2024-02-28 13:03:28 -08:00 |
|
test_layer_variation.py
|
[CI] Try introducing isort. (#3495)
|
2024-03-25 07:59:47 -07:00 |
|
test_layers.py
|
[Kernel] Full Tensor Parallelism for LoRA Layers (#3524)
|
2024-04-27 00:03:48 -07:00 |
|
test_llama.py
|
[CI] Try introducing isort. (#3495)
|
2024-03-25 07:59:47 -07:00 |
|
test_lora_checkpoints.py
|
[Bugfix] Fix LoRA loading check (#4138)
|
2024-04-19 00:59:54 -07:00 |
|
test_lora_manager.py
|
[CI] Try introducing isort. (#3495)
|
2024-03-25 07:59:47 -07:00 |
|
test_lora.py
|
[Experimental] Add multi-LoRA support (#1804)
|
2024-01-23 15:26:37 -08:00 |
|
test_mixtral.py
|
Re-enable the 80 char line width limit (#3305)
|
2024-03-10 19:49:14 -07:00 |
|
test_punica.py
|
[Kernel] Full Tensor Parallelism for LoRA Layers (#3524)
|
2024-04-27 00:03:48 -07:00 |
|
test_quant_model.py
|
[Core] Support LoRA on quantized models (#4012)
|
2024-04-11 21:02:44 -07:00 |
|
test_tokenizer_group.py
|
[CI] Try introducing isort. (#3495)
|
2024-03-25 07:59:47 -07:00 |
|
test_utils.py
|
[CI] Try introducing isort. (#3495)
|
2024-03-25 07:59:47 -07:00 |
|
test_worker.py
|
[Core] Refactor model loading code (#4097)
|
2024-04-16 11:34:39 -07:00 |
|
utils.py
|
[Experimental] Add multi-LoRA support (#1804)
|
2024-01-23 15:26:37 -08:00 |