[BugFix] Fix return type of executor execute_model methods (#4402)

This commit is contained in:
Nick Hill 2024-04-27 11:17:45 -07:00 committed by GitHub
parent d6e520e170
commit ba4be44c32
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 9 additions and 8 deletions

View File

@ -109,7 +109,7 @@ class CPUExecutorAsync(CPUExecutor, ExecutorAsyncBase):
blocks_to_swap_in: Dict[int, int],
blocks_to_swap_out: Dict[int, int],
blocks_to_copy: Dict[int, List[int]],
) -> SamplerOutput:
) -> List[SamplerOutput]:
output = await make_async(self.driver_worker.execute_model)(
seq_group_metadata_list=seq_group_metadata_list,
blocks_to_swap_in=blocks_to_swap_in,

View File

@ -1,5 +1,5 @@
from abc import abstractmethod
from typing import Any, Dict, Optional, Set, Tuple
from typing import Any, Dict, List, Optional, Set, Tuple
from vllm.executor.executor_base import ExecutorAsyncBase
from vllm.executor.gpu_executor import GPUExecutor
@ -52,7 +52,7 @@ class DistributedGPUExecutor(GPUExecutor):
num_gpu_blocks=num_gpu_blocks,
num_cpu_blocks=num_cpu_blocks)
def execute_model(self, *args, **kwargs) -> SamplerOutput:
def execute_model(self, *args, **kwargs) -> List[SamplerOutput]:
all_outputs = self._run_workers("execute_model",
driver_args=args,
driver_kwargs=kwargs)
@ -105,7 +105,8 @@ class DistributedGPUExecutorAsync(DistributedGPUExecutor, ExecutorAsyncBase):
"""Runs the given method on all workers."""
raise NotImplementedError
async def execute_model_async(self, *args, **kwargs) -> SamplerOutput:
async def execute_model_async(self, *args,
**kwargs) -> List[SamplerOutput]:
all_outputs = await self._run_workers_async("execute_model",
driver_args=args,
driver_kwargs=kwargs)

View File

@ -112,7 +112,7 @@ class ExecutorAsyncBase(ExecutorBase):
blocks_to_swap_in: Dict[int, int],
blocks_to_swap_out: Dict[int, int],
blocks_to_copy: Dict[int, List[int]],
) -> SamplerOutput:
) -> List[SamplerOutput]:
"""Executes one model step on the given sequences."""
raise NotImplementedError

View File

@ -163,7 +163,7 @@ class GPUExecutorAsync(GPUExecutor, ExecutorAsyncBase):
blocks_to_swap_in: Dict[int, int],
blocks_to_swap_out: Dict[int, int],
blocks_to_copy: Dict[int, List[int]],
) -> SamplerOutput:
) -> List[SamplerOutput]:
output = await make_async(self.driver_worker.execute_model)(
seq_group_metadata_list=seq_group_metadata_list,
blocks_to_swap_in=blocks_to_swap_in,

View File

@ -84,7 +84,7 @@ class NeuronExecutorAsync(NeuronExecutor, ExecutorAsyncBase):
blocks_to_swap_in: Dict[int, int],
blocks_to_swap_out: Dict[int, int],
blocks_to_copy: Dict[int, List[int]],
) -> SamplerOutput:
) -> List[SamplerOutput]:
output = await make_async(self.driver_worker.execute_model)(
seq_group_metadata_list=seq_group_metadata_list, )
return output

View File

@ -188,7 +188,7 @@ class RayGPUExecutor(DistributedGPUExecutor):
blocks_to_swap_in: Dict[int, int],
blocks_to_swap_out: Dict[int, int],
blocks_to_copy: Dict[int, List[int]],
num_lookahead_slots: int = 0) -> SamplerOutput:
num_lookahead_slots: int = 0) -> List[SamplerOutput]:
all_outputs = self._run_workers(
"execute_model",
driver_kwargs={