Skip to content

Commit

Permalink
supports_tp_plan
Browse files Browse the repository at this point in the history
  • Loading branch information
kwen2501 committed Nov 16, 2024
1 parent 93ba283 commit 73524c9
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4371,7 +4371,7 @@ def from_pretrained(

if tp_plan is not None:
assert tp_device is not None, "tp_device not set!"
if not model.has_tp_plan:
if not model.supports_tp_plan:
raise NotImplementedError("This model does not have a tensor parallel plan.")
# Assuming sharding the model onto the world
world_size = torch.distributed.get_world_size()
Expand Down Expand Up @@ -5069,7 +5069,7 @@ def _is_quantized_training_enabled(self):
return self.hf_quantizer.is_trainable

@property
def has_tp_plan(self):
def supports_tp_plan(self):
"""
Returns whether the model has a tensor parallelism plan.
"""
Expand Down

0 comments on commit 73524c9

Please sign in to comment.