Skip to content

Commit

Permalink
Use random_attention_mask for TF tests (#16517)
Browse files Browse the repository at this point in the history
* use random_attention_mask for TF tests

* Fix for TFCLIP test (for now).

Co-authored-by: ydshieh <[email protected]>
  • Loading branch information
ydshieh and ydshieh authored Apr 1, 2022
1 parent 823dbf8 commit 2199382
Show file tree
Hide file tree
Showing 29 changed files with 62 additions and 57 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -92,7 +92,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/albert/test_modeling_tf_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -96,7 +96,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/bert/test_modeling_tf_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ..utils.test_modeling_tf_core import TFCoreModelTesterMixin


Expand Down Expand Up @@ -96,7 +96,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
6 changes: 6 additions & 0 deletions tests/clip/test_modeling_tf_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,12 @@ def prepare_config_and_inputs(self):
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
# make sure the first token has attention mask `1` to ensure that, after combining the causal mask, there
# is still at least one token being attended to for each batch.
# TODO: Change `random_attention_mask` in PT/TF/Flax common test file, after a discussion with the team.
input_mask = tf.concat(
[tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1
)

config = self.get_config()

Expand Down
4 changes: 2 additions & 2 deletions tests/convbert/test_modeling_tf_convbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -94,7 +94,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/ctrl/test_modeling_tf_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -69,7 +69,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/deberta/test_modeling_tf_deberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -92,7 +92,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/deberta_v2/test_modeling_tf_deberta_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -95,7 +95,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/distilbert/test_modeling_tf_distilbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -70,7 +70,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

sequence_labels = None
token_labels = None
Expand Down
7 changes: 3 additions & 4 deletions tests/dpr/test_modeling_tf_dpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -94,9 +94,8 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor(
[self.batch_size, self.seq_length], vocab_size=2
) # follow test_modeling_tf_ctrl.py
# follow test_modeling_tf_ctrl.py
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/electra/test_modeling_tf_electra.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -71,7 +71,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/flaubert/test_modeling_tf_flaubert.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -75,7 +75,7 @@ def __init__(

def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2, dtype=tf.float32)
input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32)

input_lengths = None
if self.use_input_lengths:
Expand Down
4 changes: 2 additions & 2 deletions tests/funnel/test_modeling_tf_funnel.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -111,7 +111,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/gpt2/test_modeling_tf_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ..utils.test_modeling_tf_core import TFCoreModelTesterMixin


Expand Down Expand Up @@ -74,7 +74,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/gptj/test_modeling_tf_gptj.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow, tooslow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ..utils.test_modeling_tf_core import TFCoreModelTesterMixin


Expand Down Expand Up @@ -70,7 +70,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/layoutlm/test_modeling_tf_layoutlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -107,7 +107,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/longformer/test_modeling_tf_longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -79,7 +79,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/lxmert/test_modeling_tf_lxmert.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -124,7 +124,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_lang_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
Expand Down
4 changes: 2 additions & 2 deletions tests/mobilebert/test_modeling_tf_mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -114,7 +114,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/mpnet/test_modeling_tf_mpnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -90,7 +90,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

sequence_labels = None
token_labels = None
Expand Down
4 changes: 2 additions & 2 deletions tests/openai/test_modeling_tf_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -70,7 +70,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
4 changes: 2 additions & 2 deletions tests/rembert/test_modeling_tf_rembert.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from transformers.testing_utils import require_tf, slow

from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask


if is_tf_available():
Expand Down Expand Up @@ -95,7 +95,7 @@ def prepare_config_and_inputs(self):

input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
input_mask = random_attention_mask([self.batch_size, self.seq_length])

token_type_ids = None
if self.use_token_type_ids:
Expand Down
Loading

0 comments on commit 2199382

Please sign in to comment.