[Bugfix] Fix null modules_to_not_convert in FBGEMM Fp8 quantization (#6665)
This commit is contained in:
parent
e0c15758b8
commit
c5e8330997
@ -31,7 +31,7 @@ class FBGEMMFp8Config(QuantizationConfig):
|
||||
"""Config class for FBGEMM Fp8."""
|
||||
|
||||
def __init__(self, ignore_list: List[str], input_scale_ub: float):
|
||||
self.ignore_list = ignore_list
|
||||
self.ignore_list = ignore_list if ignore_list else []
|
||||
self.input_scale_ub = input_scale_ub
|
||||
|
||||
# For GPUs that lack FP8 hardware support, we can leverage the Marlin
|
||||
|
||||
Loading…
Reference in New Issue
Block a user