Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1976,11 +1976,6 @@ class EdgeTamVideoModel(EdgeTamVideoPreTrainedModel):
input_modalities = ("video", "text")
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(EdgeTamVideoTwoWayAttentionBlock, index=2)}
_keys_to_ignore_on_load_unexpected = []
_tied_weights_keys = {
"prompt_encoder.shared_embedding.positional_embedding": "shared_image_embedding.positional_embedding"
}
# need to be ignored, as it's a buffer and will not be correctly detected as tied weight
_keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"]
Comment on lines -1979 to -1983
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it really not needed at all anymore? We could otherwise very easily allow buffers to be tied (but a bit weird)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Everything seems to work just fine without it 🤷, no warnings either

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well, for sure you won't have warnings about tied weights if we never tie them haha - did you make sure that both weights correctly live inside ALL main checkpoints? If it's the case, then yes probably they were never supposed to be tied

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes all is working fine, the tying was also removed for sam/sam2, we just forgot about this one I guess


def __init__(self, config: EdgeTamVideoConfig):
super().__init__(config)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1040,11 +1040,6 @@ class EdgeTamVideoSegmentationOutput(Sam2VideoSegmentationOutput):

@auto_docstring
class EdgeTamVideoModel(Sam2VideoModel):
_tied_weights_keys = {
"prompt_encoder.shared_embedding.positional_embedding": "shared_image_embedding.positional_embedding"
}
# need to be ignored, as it's a buffer and will not be correctly detected as tied weight
_keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"]
_keys_to_ignore_on_load_unexpected = []
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(EdgeTamVideoTwoWayAttentionBlock, index=2)}

Expand Down
5 changes: 0 additions & 5 deletions src/transformers/models/sam2_video/modeling_sam2_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -1559,11 +1559,6 @@ class Sam2VideoModel(Sam2VideoPreTrainedModel):
input_modalities = ("video", "text")
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam2VideoTwoWayAttentionBlock, index=2)}
_keys_to_ignore_on_load_unexpected = []
_tied_weights_keys = {
"prompt_encoder.shared_embedding.positional_embedding": "shared_image_embedding.positional_embedding"
}
# need to be ignored, as it's a buffer and will not be correctly detected as tied weight
_keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"]

def __init__(self, config: Sam2VideoConfig):
super().__init__(config)
Expand Down
5 changes: 0 additions & 5 deletions src/transformers/models/sam2_video/modular_sam2_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -1446,11 +1446,6 @@ def get_1d_sine_pe(pos_inds, dim, temperature=10000):
@auto_docstring
class Sam2VideoModel(Sam2Model):
input_modalities = ("video", "text")
_tied_weights_keys = {
"prompt_encoder.shared_embedding.positional_embedding": "shared_image_embedding.positional_embedding"
}
# need to be ignored, as it's a buffer and will not be correctly detected as tied weight
_keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"]
_keys_to_ignore_on_load_unexpected = []
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam2VideoTwoWayAttentionBlock, index=2)}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1567,8 +1567,6 @@ class Sam3TrackerVideoModel(Sam3TrackerVideoPreTrainedModel):
input_modalities = ("video", "text")
_can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam3TrackerVideoTwoWayAttentionBlock, index=2)}
_keys_to_ignore_on_load_unexpected = [r"^detector_model."]
_tied_weights_keys = {}
_keys_to_ignore_on_load_missing = []
_checkpoint_conversion_mapping = {
r"tracker_model.(.+)": r"\1", # the regex allows to remove the prefix, and add it back in revert mode
"detector_model.vision_encoder.backbone.": "vision_encoder.backbone.",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -461,8 +461,6 @@ class Sam3TrackerVideoModel(Sam2VideoModel):
"tracker_neck.": "vision_encoder.neck.",
}
_keys_to_ignore_on_load_unexpected = [r"^detector_model."]
_tied_weights_keys = {}
_keys_to_ignore_on_load_missing = []

def __init__(self, config: Sam3TrackerVideoConfig, remove_vision_encoder: bool = False):
r"""
Expand Down