From c3af44722cff56bba5fc912c8e16d9de02dfb532 Mon Sep 17 00:00:00 2001 From: Kuntai Du Date: Mon, 20 May 2024 13:16:57 -0700 Subject: [PATCH] [Doc]Add documentation to benchmarking script when running TGI (#4920) --- benchmarks/benchmark_serving.py | 4 ++++ benchmarks/launch_tgi_server.sh | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/benchmarks/benchmark_serving.py b/benchmarks/benchmark_serving.py index 2c2d69da..9c3fed48 100644 --- a/benchmarks/benchmark_serving.py +++ b/benchmarks/benchmark_serving.py @@ -17,6 +17,10 @@ On the client side, run: --dataset-path \ --request-rate \ # By default is inf --num-prompts # By default is 1000 + + when using tgi backend, add + --endpoint /generate_stream + to the end of the command above. """ import argparse import asyncio diff --git a/benchmarks/launch_tgi_server.sh b/benchmarks/launch_tgi_server.sh index 64d3c4f4..f491c90d 100755 --- a/benchmarks/launch_tgi_server.sh +++ b/benchmarks/launch_tgi_server.sh @@ -4,7 +4,7 @@ PORT=8000 MODEL=$1 TOKENS=$2 -docker run --gpus all --shm-size 1g -p $PORT:80 \ +docker run -e HF_TOKEN=$HF_TOKEN --gpus all --shm-size 1g -p $PORT:80 \ -v $PWD/data:/data \ ghcr.io/huggingface/text-generation-inference:1.4.0 \ --model-id $MODEL \