From fa0c114fad4e2b807503e78d5110558cfee92ba4 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 17 Sep 2024 16:24:06 -0700 Subject: [PATCH] [doc] improve installation doc (#8550) Co-authored-by: Andy Dai <76841985+Imss27@users.noreply.github.com> --- docs/source/getting_started/installation.rst | 2 ++ tests/compile/test_full_graph.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index 50a761b4..0322503a 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -95,6 +95,8 @@ You can also build and install vLLM from source: $ export MAX_JOBS=6 $ pip install -e . + This is especially useful when you are building on less powerful machines. For example, when you use WSL, it only `gives you half of the memory by default `_, and you'd better use ``export MAX_JOBS=1`` to avoid compiling multiple files simultaneously and running out of memory. The side effect is that the build process will be much slower. If you only touch the Python code, slow compilation is okay, as you are building in an editable mode: you can just change the code and run the Python script without any re-compilation or re-installation. + .. tip:: If you have trouble building vLLM, we recommend using the NVIDIA PyTorch Docker image. diff --git a/tests/compile/test_full_graph.py b/tests/compile/test_full_graph.py index 6fc44553..2e309aaa 100644 --- a/tests/compile/test_full_graph.py +++ b/tests/compile/test_full_graph.py @@ -28,7 +28,10 @@ def test_full_graph(model, tp_size): "The future of AI is", ] sampling_params = SamplingParams(temperature=0) - llm = LLM(model=model, enforce_eager=True, tensor_parallel_size=tp_size) + llm = LLM(model=model, + enforce_eager=True, + tensor_parallel_size=tp_size, + disable_custom_all_reduce=True) outputs = llm.generate(prompts, sampling_params)