From 8e1529dc573c9b4697fca24944918b8d68fd5906 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Sat, 9 Nov 2024 00:26:52 -0600 Subject: [PATCH] [CI/Build] Add run-hpu-test.sh script (#10167) Signed-off-by: Chendi.Xue --- .buildkite/run-hpu-test.sh | 16 ++++++++++++++++ Dockerfile.hpu | 2 ++ 2 files changed, 18 insertions(+) create mode 100644 .buildkite/run-hpu-test.sh diff --git a/.buildkite/run-hpu-test.sh b/.buildkite/run-hpu-test.sh new file mode 100644 index 00000000..4505dc7a --- /dev/null +++ b/.buildkite/run-hpu-test.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# This script build the CPU docker image and run the offline inference inside the container. +# It serves a sanity check for compilation and basic model usage. +set -ex + +# Try building the docker image +docker build -t hpu-test-env -f Dockerfile.hpu . + +# Setup cleanup +remove_docker_container() { docker rm -f hpu-test || true; } +trap remove_docker_container EXIT +remove_docker_container + +# Run the image and launch offline inference +docker run --runtime=habana --name=hpu-test --network=host -e VLLM_SKIP_WARMUP=true --entrypoint="" hpu-test-env python3 examples/offline_inference.py \ No newline at end of file diff --git a/Dockerfile.hpu b/Dockerfile.hpu index f481c8c6..d18fc016 100644 --- a/Dockerfile.hpu +++ b/Dockerfile.hpu @@ -13,4 +13,6 @@ RUN VLLM_TARGET_DEVICE=hpu python3 setup.py install WORKDIR /workspace/ +RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks + ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"]