[Bugfix] Fix null modules_to_not_convert in FBGEMM Fp8 quantization (#6665)

This commit is contained in:
Cheng Li 2024-07-22 19:25:05 -07:00 committed by GitHub
parent e0c15758b8
commit c5e8330997
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -31,7 +31,7 @@ class FBGEMMFp8Config(QuantizationConfig):
"""Config class for FBGEMM Fp8.""" """Config class for FBGEMM Fp8."""
def __init__(self, ignore_list: List[str], input_scale_ub: float): def __init__(self, ignore_list: List[str], input_scale_ub: float):
self.ignore_list = ignore_list self.ignore_list = ignore_list if ignore_list else []
self.input_scale_ub = input_scale_ub self.input_scale_ub = input_scale_ub
# For GPUs that lack FP8 hardware support, we can leverage the Marlin # For GPUs that lack FP8 hardware support, we can leverage the Marlin