From 57f560aa23077ed9def5952ab81a65bc080ae234 Mon Sep 17 00:00:00 2001 From: Aditya Paliwal Date: Mon, 5 Aug 2024 09:26:14 -0700 Subject: [PATCH] [BugFix] Use args.trust_remote_code (#7121) --- vllm/entrypoints/openai/api_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index a0190f3d..88f0bd4e 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -60,11 +60,11 @@ logger = init_logger('vllm.entrypoints.openai.api_server') _running_tasks: Set[asyncio.Task] = set() -def model_is_embedding(model_name: str) -> bool: +def model_is_embedding(model_name: str, trust_remote_code: bool) -> bool: return ModelConfig(model=model_name, tokenizer=model_name, tokenizer_mode="auto", - trust_remote_code=False, + trust_remote_code=trust_remote_code, seed=0, dtype="float16").embedding_mode @@ -97,7 +97,7 @@ async def build_async_engine_client(args) -> AsyncIterator[AsyncEngineClient]: # If manually triggered or embedding model, use AsyncLLMEngine in process. # TODO: support embedding model via RPC. - if (model_is_embedding(args.model) + if (model_is_embedding(args.model, args.trust_remote_code) or args.disable_frontend_multiprocessing): async_engine_client = AsyncLLMEngine.from_engine_args( engine_args, usage_context=UsageContext.OPENAI_API_SERVER)