We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
PEFT
1 parent 80134e6 commit 5c6d6beCopy full SHA for 5c6d6be
tests/utils/test_modeling_utils.py
@@ -1818,6 +1818,8 @@ def test_cache_when_needed_at_train_time(self):
1818
self.assertTrue(model.training)
1819
1820
# We can also disable the cache to skip a few operations, if the training loop doesn't need cache
1821
+ # NOTE: after #41900, we need to pass the correct attention mask size
1822
+ model_inputs["attention_mask"] = model_inputs["attention_mask"][:, :-num_virtual_tokens]
1823
model_outputs = model(**model_inputs, use_cache=False)
1824
self.assertIsNone(model_outputs.past_key_values)
1825
0 commit comments