Skip to content

Commit

Permalink
Updated transformers doc notebooks with commit 20273ee214d253eaa133ac…
Browse files Browse the repository at this point in the history
…3f146cf778d843ace4 \n\nSee: huggingface/transformers@20273ee
  • Loading branch information
HuggingFaceDocBuilder committed Jun 19, 2023
1 parent 681d492 commit 2389656
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 4 deletions.
4 changes: 2 additions & 2 deletions transformers_doc/en/masked_language_modeling.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -742,7 +742,7 @@
"source": [
"from transformers import AutoTokenizer\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained(\"my_awesome_eli5_mlm_model\")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"stevhliu/my_awesome_eli5_mlm_model\")\n",
"inputs = tokenizer(text, return_tensors=\"pt\")\n",
"mask_token_index = torch.where(inputs[\"input_ids\"] == tokenizer.mask_token_id)[1]"
]
Expand Down Expand Up @@ -814,7 +814,7 @@
"source": [
"from transformers import AutoTokenizer\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained(\"my_awesome_eli5_mlm_model\")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"stevhliu/my_awesome_eli5_mlm_model\")\n",
"inputs = tokenizer(text, return_tensors=\"tf\")\n",
"mask_token_index = tf.where(inputs[\"input_ids\"] == tokenizer.mask_token_id)[0, 1]"
]
Expand Down
2 changes: 1 addition & 1 deletion transformers_doc/en/pytorch/masked_language_modeling.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,7 @@
"source": [
"from transformers import AutoTokenizer\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained(\"my_awesome_eli5_mlm_model\")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"stevhliu/my_awesome_eli5_mlm_model\")\n",
"inputs = tokenizer(text, return_tensors=\"pt\")\n",
"mask_token_index = torch.where(inputs[\"input_ids\"] == tokenizer.mask_token_id)[1]"
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@
"source": [
"from transformers import AutoTokenizer\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained(\"my_awesome_eli5_mlm_model\")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"stevhliu/my_awesome_eli5_mlm_model\")\n",
"inputs = tokenizer(text, return_tensors=\"tf\")\n",
"mask_token_index = tf.where(inputs[\"input_ids\"] == tokenizer.mask_token_id)[0, 1]"
]
Expand Down

0 comments on commit 2389656

Please sign in to comment.