Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
fix
  • Loading branch information
MekkCyber committed Apr 9, 2025
commit b3e08ec1084fd8eb3dd465901f695f064af6a59a
2 changes: 1 addition & 1 deletion src/transformers/integrations/tensor_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def __init__(

@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
if isinstance(inputs[0], DTensor):
if inputs and isinstance(inputs[0], DTensor):
inputs = inputs[0].to_local()
return inputs

Expand Down
6 changes: 5 additions & 1 deletion src/transformers/quantizers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,10 @@ def forward(
MODULES_TO_PATCH_FOR_QUANTIZATION = {
"Llama4TextExperts": {
"module_name": SequentialLlama4TextExperts,
"quantization_methods": [QuantizationMethod.COMPRESSED_TENSORS, QuantizationMethod.BITS_AND_BYTES],
"quantization_methods": [
QuantizationMethod.COMPRESSED_TENSORS,
QuantizationMethod.BITS_AND_BYTES,
QuantizationMethod.FBGEMM_FP8,
],
}
}
31 changes: 31 additions & 0 deletions src/transformers/quantizers/quantizer_fbgemm_fp8.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,37 @@ def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> Li
not_missing_keys.append(missing)
return [k for k in missing_keys if k not in not_missing_keys]

def update_tp_plan(self, config):
additional_text_plan = {
"layers.*.self_attn.q_proj.weight": "local_colwise",
"layers.*.self_attn.q_proj.weight_scale": "local_colwise",
"layers.*.self_attn.k_proj.weight": "local_colwise",
"layers.*.self_attn.k_proj.weight_scale": "local_colwise",
"layers.*.self_attn.v_proj.weight": "local_colwise",
"layers.*.self_attn.v_proj.weight_scale": "local_colwise",
"layers.*.self_attn.o_proj.weight": "local_rowwise",
"layers.*.self_attn": "gather",
"layers.*.input_layernorm.weight": "sequence_parallel",
"layers.*.post_attention_layernorm.weight": "sequence_parallel",
"norm.weight": "sequence_parallel",
"layers.*.feed_forward.shared_expert.gate_proj.weight": "local_colwise",
"layers.*.feed_forward.shared_expert.gate_proj.weight_scale": "local_colwise",
"layers.*.feed_forward.shared_expert.up_proj.weight": "local_colwise",
"layers.*.feed_forward.shared_expert.up_proj.weight_scale": "local_colwise",
"layers.*.feed_forward.shared_expert.down_proj.weight": "local_rowwise",
"layers.*.feed_forward.experts": "local",
"layers.*.feed_forward": "gather",
"layers.*.feed_forward.experts.*.gate_proj.weight": "local_colwise",
"layers.*.feed_forward.experts.*.gate_proj.weight_scale": "local_colwise",
"layers.*.feed_forward.experts.*.up_proj.weight": "local_colwise",
"layers.*.feed_forward.experts.*.up_proj.weight_scale": "local_colwise",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please add a comment on why you decided to update the tp plan for these layers

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will do in a follow up pr

"layers.*.feed_forward.experts.*.down_proj.weight": "local_rowwise",
}
if config.get_text_config() is not None and config.get_text_config().base_model_tp_plan is not None:
config.get_text_config().base_model_tp_plan = additional_text_plan

return config

def is_serializable(self, safe_serialization=None):
return True

Expand Down