diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index a0190f3d..88f0bd4e 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -60,11 +60,11 @@ logger = init_logger('vllm.entrypoints.openai.api_server') _running_tasks: Set[asyncio.Task] = set() -def model_is_embedding(model_name: str) -> bool: +def model_is_embedding(model_name: str, trust_remote_code: bool) -> bool: return ModelConfig(model=model_name, tokenizer=model_name, tokenizer_mode="auto", - trust_remote_code=False, + trust_remote_code=trust_remote_code, seed=0, dtype="float16").embedding_mode @@ -97,7 +97,7 @@ async def build_async_engine_client(args) -> AsyncIterator[AsyncEngineClient]: # If manually triggered or embedding model, use AsyncLLMEngine in process. # TODO: support embedding model via RPC. - if (model_is_embedding(args.model) + if (model_is_embedding(args.model, args.trust_remote_code) or args.disable_frontend_multiprocessing): async_engine_client = AsyncLLMEngine.from_engine_args( engine_args, usage_context=UsageContext.OPENAI_API_SERVER)