diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py index b425af48..1d7e5d25 100644 --- a/vllm/model_executor/models/bloom.py +++ b/vllm/model_executor/models/bloom.py @@ -139,7 +139,6 @@ class BloomMLP(nn.Module): 4 * hidden_size, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.gelu_impl = get_act_fn("gelu", quant_config, 4 * hidden_size) self.dense_4h_to_h = RowParallelLinear( 4 * hidden_size, diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index 4be1f064..08dd6992 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -203,7 +203,6 @@ class FalconMLP(nn.Module): bias=config.bias, skip_bias_add=True, quant_config=quant_config) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn("gelu", quant_config, 4 * hidden_size) self.reduce_row_parallel_results = not (config.new_decoder_architecture or config.parallel_attn) diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index ac1dce6d..75eaebf0 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -107,7 +107,6 @@ class GPT2MLP(nn.Module): bias=True, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size) diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py index e52ac679..d057fd92 100644 --- a/vllm/model_executor/models/gpt_bigcode.py +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -128,7 +128,6 @@ class GPTBigMLP(nn.Module): bias=True, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size) diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index 287f4186..8d7fe8a5 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -120,7 +120,6 @@ class GPTJMLP(nn.Module): hidden_size, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size) diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index cbc5115b..bab563b9 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -119,7 +119,6 @@ class GPTNeoXMLP(nn.Module): config.hidden_size, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn(config.hidden_act, quant_config, config.intermediate_size) diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py index 8c5e7e77..6fa5c5bd 100644 --- a/vllm/model_executor/models/mpt.py +++ b/vllm/model_executor/models/mpt.py @@ -146,7 +146,6 @@ class MPTMLP(nn.Module): bias=not config.no_bias, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn("gelu", quant_config, intermediate_size) self.down_proj = RowParallelLinear( intermediate_size, diff --git a/vllm/model_executor/models/opt.py b/vllm/model_executor/models/opt.py index 838a2f0a..336f765a 100644 --- a/vllm/model_executor/models/opt.py +++ b/vllm/model_executor/models/opt.py @@ -130,7 +130,6 @@ class OPTDecoderLayer(nn.Module): bias=config.enable_bias, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.activation_fn = get_act_fn(config.activation_function, quant_config, config.ffn_dim) self.fc2 = RowParallelLinear( diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index 7a9b8dcd..4a458792 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -142,7 +142,6 @@ class PhiMLP(nn.Module): config.hidden_size, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn(config.hidden_act, quant_config, n_inner) def forward(self, hidden_states): diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 29d887b2..33998e2a 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -136,7 +136,6 @@ class Starcoder2MLP(nn.Module): bias=config.use_bias, quant_config=quant_config, ) - quant_config = getattr(quant_config, "quant_config", None) self.act = get_act_fn(config.hidden_act, quant_config, config.intermediate_size)