[Core]Refactor gptq_marlin ops (#4466)
This commit is contained in:
parent
fa32207842
commit
26f2fb5113
@ -167,6 +167,22 @@ def aqlm_dequant(codes: torch.Tensor, codebooks: torch.Tensor,
|
|||||||
return vllm_ops.aqlm_dequant(codes, codebooks, codebook_partition_sizes)
|
return vllm_ops.aqlm_dequant(codes, codebooks, codebook_partition_sizes)
|
||||||
|
|
||||||
|
|
||||||
|
# gptq_marlin
|
||||||
|
def gptq_marlin_repack(b_q_weight: torch.Tensor, perm: torch.Tensor,
|
||||||
|
size_k: int, size_n: int) -> torch.Tensor:
|
||||||
|
return vllm_ops.gptq_marlin_repack(b_q_weight, perm, size_k, size_n)
|
||||||
|
|
||||||
|
|
||||||
|
def gptq_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
|
||||||
|
b_scales: torch.Tensor, g_idx: torch.Tensor,
|
||||||
|
perm: torch.Tensor, workspace: torch.Tensor, size_m: int,
|
||||||
|
size_n: int, size_k: int,
|
||||||
|
is_k_full: bool) -> torch.Tensor:
|
||||||
|
return vllm_ops.gptq_marlin_gemm(a, b_q_weight, b_scales, g_idx, perm,
|
||||||
|
workspace, size_m, size_n, size_k,
|
||||||
|
is_k_full)
|
||||||
|
|
||||||
|
|
||||||
# fp8
|
# fp8
|
||||||
def scaled_fp8_quant(
|
def scaled_fp8_quant(
|
||||||
input: torch.Tensor,
|
input: torch.Tensor,
|
||||||
|
|||||||
@ -6,7 +6,7 @@ import numpy
|
|||||||
import torch
|
import torch
|
||||||
from torch.nn.parameter import Parameter
|
from torch.nn.parameter import Parameter
|
||||||
|
|
||||||
from vllm._C import ops
|
from vllm import _custom_ops as ops
|
||||||
from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase,
|
from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase,
|
||||||
set_weight_attrs)
|
set_weight_attrs)
|
||||||
from vllm.model_executor.layers.quantization.base_config import (
|
from vllm.model_executor.layers.quantization.base_config import (
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user