[BugFix] Use consistent logger everywhere (#3738)
This commit is contained in:
parent
8b2d3cbc1b
commit
991143cfcd
@ -1,6 +1,5 @@
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
@ -11,13 +10,14 @@ import torch
|
||||
from torch import nn
|
||||
|
||||
from vllm.config import LoRAConfig
|
||||
from vllm.logger import init_logger
|
||||
from vllm.lora.layers import (BaseLayerWithLoRA, LoRAMapping, from_layer,
|
||||
from_layer_logits_processor)
|
||||
from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights
|
||||
from vllm.lora.utils import parse_fine_tuned_lora_name, replace_submodule
|
||||
from vllm.utils import LRUCache, is_pin_memory_available
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = init_logger(__name__)
|
||||
|
||||
_GLOBAL_LORA_ID = 0
|
||||
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
import logging
|
||||
from typing import Tuple
|
||||
|
||||
from torch import nn
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from vllm.logger import init_logger
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
def replace_submodule(model: nn.Module, module_name: str,
|
||||
|
||||
@ -1,16 +1,16 @@
|
||||
import logging
|
||||
from abc import ABC, abstractmethod, abstractproperty
|
||||
from typing import Any, Dict, List, Optional, Set, Type
|
||||
|
||||
import torch
|
||||
|
||||
from vllm.config import LoRAConfig
|
||||
from vllm.logger import init_logger
|
||||
from vllm.lora.layers import LoRAMapping
|
||||
from vllm.lora.models import (LoRAModel, LoRAModelManager,
|
||||
LRUCacheLoRAModelManager, create_lora_manager)
|
||||
from vllm.lora.request import LoRARequest
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
class AbstractWorkerLoRAManager(ABC):
|
||||
|
||||
@ -21,7 +21,6 @@
|
||||
|
||||
import ctypes
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
|
||||
# ===================== import region =====================
|
||||
@ -29,7 +28,9 @@ import torch
|
||||
import torch.distributed as dist
|
||||
from torch.distributed import ReduceOp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from vllm.logger import init_logger
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
so_file = os.environ.get("VLLM_NCCL_SO_PATH", "")
|
||||
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
import contextlib
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from torch.distributed import ReduceOp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from vllm.logger import init_logger
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
try:
|
||||
from vllm.model_executor.parallel_utils.pynccl import (NCCLCommunicator,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user