2023-08-31 04:35:03 +08:00
|
|
|
package llm
|
|
|
|
|
|
|
|
|
|
//go:generate git submodule init
|
2023-09-12 23:04:35 +08:00
|
|
|
|
|
|
|
|
//go:generate git submodule update --force ggml
|
2023-09-21 03:15:23 +08:00
|
|
|
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
|
|
|
|
|
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
|
2023-09-12 23:04:35 +08:00
|
|
|
//go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_K_QUANTS=on
|
2023-08-31 04:35:03 +08:00
|
|
|
//go:generate cmake --build ggml/build/cpu --target server --config Release
|
2023-10-06 22:15:42 +08:00
|
|
|
//go:generate cmd /c move ggml\build\cpu\bin\Release\server.exe ggml\build\cpu\bin\Release\ollama-runner.exe
|
2023-09-12 23:04:35 +08:00
|
|
|
|
|
|
|
|
//go:generate git submodule update --force gguf
|
2023-10-24 01:41:18 +08:00
|
|
|
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
|
2023-10-28 03:13:44 +08:00
|
|
|
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
2023-09-08 01:55:37 +08:00
|
|
|
//go:generate cmake --build gguf/build/cpu --target server --config Release
|
2023-10-06 22:15:42 +08:00
|
|
|
//go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe
|
2023-11-25 06:16:36 +08:00
|
|
|
|
|
|
|
|
//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
|
|
|
|
//go:generate cmake --build ggml/build/cuda --target server --config Release
|
|
|
|
|
//go:generate cmd /c move ggml\build\cuda\bin\Release\server.exe ggml\build\cuda\bin\Release\ollama-runner.exe
|
|
|
|
|
|
|
|
|
|
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
|
|
|
|
//go:generate cmake --build gguf/build/cuda --target server --config Release
|
|
|
|
|
//go:generate cmd /c move gguf\build\cuda\bin\Release\server.exe gguf\build\cuda\bin\Release\ollama-runner.exe
|