From 991143cfcdc57d658d312bc001dd6d6dffba9495 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Fri, 29 Mar 2024 16:26:44 -0700 Subject: [PATCH] [BugFix] Use consistent logger everywhere (#3738) --- vllm/lora/models.py | 4 ++-- vllm/lora/utils.py | 5 +++-- vllm/lora/worker_manager.py | 4 ++-- vllm/model_executor/parallel_utils/pynccl.py | 5 +++-- vllm/model_executor/parallel_utils/pynccl_utils.py | 5 +++-- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/vllm/lora/models.py b/vllm/lora/models.py index ddbdd50d..945917a5 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -1,6 +1,5 @@ import copy import json -import logging import math import os import re @@ -11,13 +10,14 @@ import torch from torch import nn from vllm.config import LoRAConfig +from vllm.logger import init_logger from vllm.lora.layers import (BaseLayerWithLoRA, LoRAMapping, from_layer, from_layer_logits_processor) from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights from vllm.lora.utils import parse_fine_tuned_lora_name, replace_submodule from vllm.utils import LRUCache, is_pin_memory_available -logger = logging.getLogger(__name__) +logger = init_logger(__name__) _GLOBAL_LORA_ID = 0 diff --git a/vllm/lora/utils.py b/vllm/lora/utils.py index f67a3812..39e08f04 100644 --- a/vllm/lora/utils.py +++ b/vllm/lora/utils.py @@ -1,9 +1,10 @@ -import logging from typing import Tuple from torch import nn -logger = logging.getLogger(__name__) +from vllm.logger import init_logger + +logger = init_logger(__name__) def replace_submodule(model: nn.Module, module_name: str, diff --git a/vllm/lora/worker_manager.py b/vllm/lora/worker_manager.py index 840f1b51..3224b3a9 100644 --- a/vllm/lora/worker_manager.py +++ b/vllm/lora/worker_manager.py @@ -1,16 +1,16 @@ -import logging from abc import ABC, abstractmethod, abstractproperty from typing import Any, Dict, List, Optional, Set, Type import torch from vllm.config import LoRAConfig +from vllm.logger import init_logger from vllm.lora.layers import LoRAMapping from vllm.lora.models import (LoRAModel, LoRAModelManager, LRUCacheLoRAModelManager, create_lora_manager) from vllm.lora.request import LoRARequest -logger = logging.getLogger(__name__) +logger = init_logger(__name__) class AbstractWorkerLoRAManager(ABC): diff --git a/vllm/model_executor/parallel_utils/pynccl.py b/vllm/model_executor/parallel_utils/pynccl.py index 5d7f2fdc..2aed70f0 100644 --- a/vllm/model_executor/parallel_utils/pynccl.py +++ b/vllm/model_executor/parallel_utils/pynccl.py @@ -21,7 +21,6 @@ import ctypes import datetime -import logging import os # ===================== import region ===================== @@ -29,7 +28,9 @@ import torch import torch.distributed as dist from torch.distributed import ReduceOp -logger = logging.getLogger(__name__) +from vllm.logger import init_logger + +logger = init_logger(__name__) so_file = os.environ.get("VLLM_NCCL_SO_PATH", "") diff --git a/vllm/model_executor/parallel_utils/pynccl_utils.py b/vllm/model_executor/parallel_utils/pynccl_utils.py index 45915b49..a099777a 100644 --- a/vllm/model_executor/parallel_utils/pynccl_utils.py +++ b/vllm/model_executor/parallel_utils/pynccl_utils.py @@ -1,11 +1,12 @@ import contextlib -import logging from typing import Optional import torch from torch.distributed import ReduceOp -logger = logging.getLogger(__name__) +from vllm.logger import init_logger + +logger = init_logger(__name__) try: from vllm.model_executor.parallel_utils.pynccl import (NCCLCommunicator,