[Bugfix] Add init_cached_hf_modules to RayWorkerWrapper (#4286)

This commit is contained in:
DefTruth 2024-04-24 00:28:35 +08:00 committed by GitHub
parent d3c8180ac4
commit d87f39e9a9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 8 additions and 1 deletions

View File

@ -100,6 +100,7 @@ class RayGPUExecutor(ExecutorBase):
)(RayWorkerWrapper).remote(
worker_module_name="vllm.worker.worker",
worker_class_name="Worker",
trust_remote_code=self.model_config.trust_remote_code,
)
worker_ip = ray.get(worker.get_node_ip.remote())
@ -110,6 +111,7 @@ class RayGPUExecutor(ExecutorBase):
self.driver_worker = RayWorkerWrapper(
worker_module_name="vllm.worker.worker",
worker_class_name="Worker",
trust_remote_code=self.model_config.trust_remote_code,
)
else:
# Else, added to the list of workers.

View File

@ -103,10 +103,15 @@ class WorkerWrapperBase:
def __init__(self,
worker_module_name=None,
worker_class_name=None) -> None:
worker_class_name=None,
trust_remote_code: bool = False) -> None:
self.worker_module_name = worker_module_name
self.worker_class_name = worker_class_name
self.worker = None
if trust_remote_code:
# note: lazy import to avoid importing torch before initializing
from vllm.utils import init_cached_hf_modules
init_cached_hf_modules()
@staticmethod
def update_environment_variables(envs: Dict[str, str]) -> None: