From 49ab16239c74ccfca2298472868caadb1d2c3878 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Mon, 20 Feb 2023 16:37:11 +0300 Subject: [PATCH] Add EfficientNet (#21563) * Add EfficientNet to transformers --- README.md | 1 + README_es.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/de/index.mdx | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/efficientnet.mdx | 47 ++ docs/source/en/serialization.mdx | 1 + docs/source/en/tasks/image_classification.mdx | 2 +- docs/source/es/index.mdx | 1 + docs/source/it/index.mdx | 1 + docs/source/ja/index.mdx | 1 + docs/source/ko/index.mdx | 1 + docs/source/pt/index.mdx | 1 + src/transformers/__init__.py | 18 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/image_processing_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 3 + .../models/efficientnet/__init__.py | 84 +++ .../configuration_efficientnet.py | 169 +++++ .../convert_efficientnet_to_pytorch.py | 339 +++++++++ .../image_processing_efficientnet.py | 347 +++++++++ .../efficientnet/modeling_efficientnet.py | 662 ++++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 24 + .../utils/dummy_vision_objects.py | 7 + .../models/convnext/test_modeling_convnext.py | 1 - tests/models/efficientnet/__init__.py | 0 .../test_image_processing_efficientnet.py | 195 ++++++ .../test_modeling_efficientnet.py | 257 +++++++ 34 files changed, 2176 insertions(+), 2 deletions(-) create mode 100644 docs/source/en/model_doc/efficientnet.mdx create mode 100644 src/transformers/models/efficientnet/__init__.py create mode 100644 src/transformers/models/efficientnet/configuration_efficientnet.py create mode 100644 src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py create mode 100644 src/transformers/models/efficientnet/image_processing_efficientnet.py create mode 100644 src/transformers/models/efficientnet/modeling_efficientnet.py create mode 100644 tests/models/efficientnet/__init__.py create mode 100644 tests/models/efficientnet/test_image_processing_efficientnet.py create mode 100644 tests/models/efficientnet/test_modeling_efficientnet.py diff --git a/README.md b/README.md index 54ed577b8eec..d166ca19a31f 100644 --- a/README.md +++ b/README.md @@ -321,6 +321,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. +1. **[EfficientNet](https://huggingface.co/docs/transformers/main/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. diff --git a/README_es.md b/README_es.md index 7ae089cc94dc..2960f648a108 100644 --- a/README_es.md +++ b/README_es.md @@ -314,6 +314,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. +1. **[EfficientNet](https://huggingface.co/docs/transformers/main/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. diff --git a/README_hd.md b/README_hd.md index 91965713d903..128e2e709843 100644 --- a/README_hd.md +++ b/README_hd.md @@ -286,6 +286,7 @@ conda install -c huggingface transformers 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (फेसबुक से) साथ में पेपर [ओपन-डोमेन क्वेश्चन आंसरिंग के लिए डेंस पैसेज रिट्रीवल](https://arxiv. org/abs/2004.04906) व्लादिमीर करपुखिन, बरलास ओज़ुज़, सेवन मिन, पैट्रिक लुईस, लेडेल वू, सर्गेई एडुनोव, डैनकी चेन, और वेन-ताऊ यिह द्वारा। 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (इंटेल लैब्स से) साथ में कागज [विज़न ट्रांसफॉर्मर्स फॉर डेंस प्रेडिक्शन](https://arxiv.org /abs/2103.13413) रेने रैनफ्टल, एलेक्सी बोचकोवस्की, व्लादलेन कोल्टन द्वारा। 1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. +1. **[EfficientNet](https://huggingface.co/docs/transformers/main/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (Google रिसर्च/स्टैनफोर्ड यूनिवर्सिटी से) साथ में दिया गया पेपर [इलेक्ट्रा: जेनरेटर के बजाय भेदभाव करने वाले के रूप में टेक्स्ट एन्कोडर्स का पूर्व-प्रशिक्षण] (https://arxiv.org/abs/2003.10555) केविन क्लार्क, मिन्ह-थांग लुओंग, क्वोक वी. ले, क्रिस्टोफर डी. मैनिंग द्वारा पोस्ट किया गया। 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (Google रिसर्च से) साथ में दिया गया पेपर [सीक्वेंस जेनरेशन टास्क के लिए प्री-ट्रेंड चेकपॉइंट का इस्तेमाल करना](https:/ /arxiv.org/abs/1907.12461) साशा रोठे, शशि नारायण, अलियाक्सि सेवेरिन द्वारा। 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)**(Baidu से) साथ देने वाला पेपर [ERNIE: एन्हांस्ड रिप्रेजेंटेशन थ्रू नॉलेज इंटीग्रेशन](https://arxiv.org/abs/1904.09223) यू सन, शुओहुआन वांग, युकुन ली, शिकुन फेंग, ज़ुई चेन, हान झांग, शिन तियान, डैनक्सियांग झू, हाओ तियान, हुआ वू द्वारा पोस्ट किया गया। diff --git a/README_ja.md b/README_ja.md index f71c09c21b9d..b6fe5386f960 100644 --- a/README_ja.md +++ b/README_ja.md @@ -348,6 +348,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (Facebook から) Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih から公開された研究論文: [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (Intel Labs から) René Ranftl, Alexey Bochkovskiy, Vladlen Koltun から公開された研究論文: [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (Snap Research から) Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. から公開された研究論文 [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) +1. **[EfficientNet](https://huggingface.co/docs/transformers/main/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (Google Research/Stanford University から) Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning から公開された研究論文: [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (Google Research から) Sascha Rothe, Shashi Narayan, Aliaksei Severyn から公開された研究論文: [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (Baidu から) Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu から公開された研究論文: [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) diff --git a/README_ko.md b/README_ko.md index ae11c211815d..559db4f52179 100644 --- a/README_ko.md +++ b/README_ko.md @@ -263,6 +263,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (Facebook 에서) Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 의 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 논문과 함께 발표했습니다. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (Intel Labs 에서) René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 의 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 논문과 함께 발표했습니다. 1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. +1. **[EfficientNet](https://huggingface.co/docs/transformers/main/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (Google Research/Stanford University 에서) Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 의 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 논문과 함께 발표했습니다. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (Google Research 에서) Sascha Rothe, Shashi Narayan, Aliaksei Severyn 의 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 논문과 함께 발표했습니다. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (Baidu 에서) Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 의 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index 870cf924ecff..df0b21443c5c 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -287,6 +287,7 @@ conda install -c huggingface transformers 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (来自 Facebook) 伴随论文 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 由 Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 发布。 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (来自 Intel Labs) 伴随论文 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 由 René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 发布。 1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (来自 Snap Research) 伴随论文 [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) 由 Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren 发布。 +1. **[EfficientNet](https://huggingface.co/docs/transformers/main/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 71729b779a64..c14e5efa9224 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -299,6 +299,7 @@ conda install -c huggingface transformers 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[EfficientFormer](https://huggingface.co/docs/transformers/model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. +1. **[EfficientNet](https://huggingface.co/docs/transformers/main/model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. diff --git a/docs/source/de/index.mdx b/docs/source/de/index.mdx index ec41ddbc9fb1..f82aa44ea6bd 100644 --- a/docs/source/de/index.mdx +++ b/docs/source/de/index.mdx @@ -86,6 +86,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen, 1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. +1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. 1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 9e32cb49426d..264824fc8734 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -442,6 +442,8 @@ title: DPT - local: model_doc/efficientformer title: EfficientFormer + - local: model_doc/efficientnet + title: EfficientNet - local: model_doc/glpn title: GLPN - local: model_doc/imagegpt diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 18ffd60388a4..42a8d5bad6e9 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -100,6 +100,7 @@ The documentation is organized into five sections: 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[EfficientFormer](model_doc/efficientformer)** (from Snap Research) released with the paper [EfficientFormer: Vision Transformers at MobileNetSpeed](https://arxiv.org/abs/2206.01191) by Yanyu Li, Geng Yuan, Yang Wen, Ju Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. +1. **[EfficientNet](model_doc/efficientnet)** (from Google Brain) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan, Quoc V. Le. 1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. @@ -290,6 +291,7 @@ Flax), PyTorch, and/or TensorFlow. | DPR | ✅ | ✅ | ✅ | ✅ | ❌ | | DPT | ❌ | ❌ | ✅ | ❌ | ❌ | | EfficientFormer | ❌ | ❌ | ✅ | ❌ | ❌ | +| EfficientNet | ❌ | ❌ | ✅ | ❌ | ❌ | | ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ | | Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ | | ERNIE | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/efficientnet.mdx b/docs/source/en/model_doc/efficientnet.mdx new file mode 100644 index 000000000000..6aeec7651c8c --- /dev/null +++ b/docs/source/en/model_doc/efficientnet.mdx @@ -0,0 +1,47 @@ + + +# EfficientNet + +## Overview + +The EfficientNet model was proposed in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) +by Mingxing Tan and Quoc V. Le. EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet being an order-of-magnitude smaller and faster than previous models. + +The abstract from the paper is the following: + +*Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. +To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.* + +This model was contributed by [adirik](https://huggingface.co/adirik). +The original code can be found [here](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). + + +## EfficientNetConfig + +[[autodoc]] EfficientNetConfig + +## EfficientNetImageProcessor + +[[autodoc]] EfficientNetImageProcessor + - preprocess + +## EfficientNetModel + +[[autodoc]] EfficientNetModel + - forward + +## EfficientNetForImageClassification + +[[autodoc]] EfficientNetForImageClassification + - forward + diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index d1485fb9cdbe..f57ea59971d2 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -76,6 +76,7 @@ Ready-made configurations include the following architectures: - DeiT - DETR - DistilBERT +- EfficientNet - ELECTRA - ERNIE - FlauBERT diff --git a/docs/source/en/tasks/image_classification.mdx b/docs/source/en/tasks/image_classification.mdx index 43041c45c366..4559b7d1af21 100644 --- a/docs/source/en/tasks/image_classification.mdx +++ b/docs/source/en/tasks/image_classification.mdx @@ -30,7 +30,7 @@ The task illustrated in this tutorial is supported by the following model archit -[BEiT](../model_doc/beit), [BiT](../model_doc/bit), [ConvNeXT](../model_doc/convnext), [CvT](../model_doc/cvt), [Data2VecVision](../model_doc/data2vec-vision), [DeiT](../model_doc/deit), [DiNAT](../model_doc/dinat), [EfficientFormer](../model_doc/efficientformer), [ImageGPT](../model_doc/imagegpt), [LeViT](../model_doc/levit), [MobileNetV1](../model_doc/mobilenet_v1), [MobileNetV2](../model_doc/mobilenet_v2), [MobileViT](../model_doc/mobilevit), [NAT](../model_doc/nat), [Perceiver](../model_doc/perceiver), [PoolFormer](../model_doc/poolformer), [RegNet](../model_doc/regnet), [ResNet](../model_doc/resnet), [SegFormer](../model_doc/segformer), [Swin Transformer](../model_doc/swin), [Swin Transformer V2](../model_doc/swinv2), [VAN](../model_doc/van), [ViT](../model_doc/vit), [ViT Hybrid](../model_doc/vit_hybrid), [ViTMSN](../model_doc/vit_msn) +[BEiT](../model_doc/beit), [BiT](../model_doc/bit), [ConvNeXT](../model_doc/convnext), [CvT](../model_doc/cvt), [Data2VecVision](../model_doc/data2vec-vision), [DeiT](../model_doc/deit), [DiNAT](../model_doc/dinat), [EfficientFormer](../model_doc/efficientformer), [EfficientNet](../model_doc/efficientnet), [ImageGPT](../model_doc/imagegpt), [LeViT](../model_doc/levit), [MobileNetV1](../model_doc/mobilenet_v1), [MobileNetV2](../model_doc/mobilenet_v2), [MobileViT](../model_doc/mobilevit), [NAT](../model_doc/nat), [Perceiver](../model_doc/perceiver), [PoolFormer](../model_doc/poolformer), [RegNet](../model_doc/regnet), [ResNet](../model_doc/resnet), [SegFormer](../model_doc/segformer), [Swin Transformer](../model_doc/swin), [Swin Transformer V2](../model_doc/swinv2), [VAN](../model_doc/van), [ViT](../model_doc/vit), [ViT Hybrid](../model_doc/vit_hybrid), [ViTMSN](../model_doc/vit_msn) diff --git a/docs/source/es/index.mdx b/docs/source/es/index.mdx index 2de430827581..997b4f97460d 100644 --- a/docs/source/es/index.mdx +++ b/docs/source/es/index.mdx @@ -77,6 +77,7 @@ La biblioteca actualmente contiene implementaciones de JAX, PyTorch y TensorFlow 1. **[DistilBERT](model_doc/distilbert)** (de HuggingFace), publicado junto con el paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) por Victor Sanh, Lysandre Debut y Thomas Wolf. Se ha aplicado el mismo método para comprimir GPT2 en [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa en [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), BERT multilingüe en [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) y una versión alemana de DistilBERT. 1. **[DPR](model_doc/dpr)** (de Facebook) publicado con el paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) por Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, y Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (de Intel Labs) publicado con el paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) por René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. +1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (de Google Research) publicado con el paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) por Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ELECTRA](model_doc/electra)** (de Google Research/Universidad de Stanford) publicado con el paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) por Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[FlauBERT](model_doc/flaubert)** (de CNRS) publicado con el paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) por Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. diff --git a/docs/source/it/index.mdx b/docs/source/it/index.mdx index 38ab8f8aa64c..7be478bd7791 100644 --- a/docs/source/it/index.mdx +++ b/docs/source/it/index.mdx @@ -83,6 +83,7 @@ La libreria attualmente contiene implementazioni in JAX, PyTorch e TensorFlow, p 1. **[DistilBERT](model_doc/distilbert)** (da HuggingFace), rilasciato assieme al paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) da Victor Sanh, Lysandre Debut e Thomas Wolf. La stessa tecnica è stata applicata per comprimere GPT2 in [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa in [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT in [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. 1. **[DPR](model_doc/dpr)** (da Facebook) rilasciato con il paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) da Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, e Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (da Intel Labs) rilasciato con il paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) da René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. +1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (da Google Research) rilasciato con il paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) da Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ELECTRA](model_doc/electra)** (da Google Research/Stanford University) rilasciato con il paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) da Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[FlauBERT](model_doc/flaubert)** (da CNRS) rilasciato con il paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) da Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. diff --git a/docs/source/ja/index.mdx b/docs/source/ja/index.mdx index 1183d2ee0ea1..d98db263461b 100644 --- a/docs/source/ja/index.mdx +++ b/docs/source/ja/index.mdx @@ -95,6 +95,7 @@ specific language governing permissions and limitations under the License. 1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (NAVER から), Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park から公開された研究論文: [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (Facebook から) Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih から公開された研究論文: [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (Intel Labs から) René Ranftl, Alexey Bochkovskiy, Vladlen Koltun から公開された研究論文: [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) +1. **[EfficientNet](https://huggingface.co/docs/transformers/model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (Google Research/Stanford University から) Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning から公開された研究論文: [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (Google Research から) Sascha Rothe, Shashi Narayan, Aliaksei Severyn から公開された研究論文: [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (Baidu から) Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu から公開された研究論文: [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) diff --git a/docs/source/ko/index.mdx b/docs/source/ko/index.mdx index f3f09057109b..8073c2daeb83 100644 --- a/docs/source/ko/index.mdx +++ b/docs/source/ko/index.mdx @@ -88,6 +88,7 @@ specific language governing permissions and limitations under the License. 1. **[Donut](model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. +1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. 1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. diff --git a/docs/source/pt/index.mdx b/docs/source/pt/index.mdx index fd21c5f913d7..bdbd7385fdcf 100644 --- a/docs/source/pt/index.mdx +++ b/docs/source/pt/index.mdx @@ -91,6 +91,7 @@ Atualmente a biblioteca contém implementações do PyTorch, TensorFlow e JAX, p 1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. +1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 73d51bb1b4a6..bf9175d757cb 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -266,6 +266,7 @@ ], "models.dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"], "models.efficientformer": ["EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientFormerConfig"], + "models.efficientnet": ["EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientNetConfig"], "models.electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraTokenizer"], "models.encoder_decoder": ["EncoderDecoderConfig"], "models.ernie": [ @@ -831,6 +832,7 @@ _import_structure["models.donut"].extend(["DonutFeatureExtractor", "DonutImageProcessor"]) _import_structure["models.dpt"].extend(["DPTFeatureExtractor", "DPTImageProcessor"]) _import_structure["models.efficientformer"].append("EfficientFormerImageProcessor") + _import_structure["models.efficientnet"].append("EfficientNetImageProcessor") _import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaImageProcessor", "FlavaProcessor"]) _import_structure["models.glpn"].extend(["GLPNFeatureExtractor", "GLPNImageProcessor"]) _import_structure["models.imagegpt"].extend(["ImageGPTFeatureExtractor", "ImageGPTImageProcessor"]) @@ -1463,6 +1465,14 @@ "EfficientFormerPreTrainedModel", ] ) + _import_structure["models.efficientnet"].extend( + [ + "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "EfficientNetForImageClassification", + "EfficientNetModel", + "EfficientNetPreTrainedModel", + ] + ) _import_structure["models.electra"].extend( [ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3833,6 +3843,7 @@ ) from .models.dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig from .models.efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig + from .models.efficientnet import EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer from .models.encoder_decoder import EncoderDecoderConfig from .models.ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig @@ -4331,6 +4342,7 @@ from .models.donut import DonutFeatureExtractor, DonutImageProcessor from .models.dpt import DPTFeatureExtractor, DPTImageProcessor from .models.efficientformer import EfficientFormerImageProcessor + from .models.efficientnet import EfficientNetImageProcessor from .models.flava import FlavaFeatureExtractor, FlavaImageProcessor, FlavaProcessor from .models.glpn import GLPNFeatureExtractor, GLPNImageProcessor from .models.imagegpt import ImageGPTFeatureExtractor, ImageGPTImageProcessor @@ -4856,6 +4868,12 @@ EfficientFormerModel, EfficientFormerPreTrainedModel, ) + from .models.efficientnet import ( + EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, + EfficientNetForImageClassification, + EfficientNetModel, + EfficientNetPreTrainedModel, + ) from .models.electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 7f53d68e2a75..580334dd9bb9 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -66,6 +66,7 @@ dpr, dpt, efficientformer, + efficientnet, electra, encoder_decoder, ernie, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index bde1bd61804d..061824bf9a66 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -74,6 +74,7 @@ ("dpr", "DPRConfig"), ("dpt", "DPTConfig"), ("efficientformer", "EfficientFormerConfig"), + ("efficientnet", "EfficientNetConfig"), ("electra", "ElectraConfig"), ("encoder-decoder", "EncoderDecoderConfig"), ("ernie", "ErnieConfig"), @@ -248,6 +249,7 @@ ("dpr", "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("efficientformer", "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("efficientnet", "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("ernie", "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("ernie_m", "ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -417,6 +419,7 @@ ("dpr", "DPR"), ("dpt", "DPT"), ("efficientformer", "EfficientFormer"), + ("efficientnet", "EfficientNet"), ("electra", "ELECTRA"), ("encoder-decoder", "Encoder decoder"), ("ernie", "ERNIE"), diff --git a/src/transformers/models/auto/image_processing_auto.py b/src/transformers/models/auto/image_processing_auto.py index d12aa1604162..fd8d58518c45 100644 --- a/src/transformers/models/auto/image_processing_auto.py +++ b/src/transformers/models/auto/image_processing_auto.py @@ -57,6 +57,7 @@ ("donut-swin", "DonutImageProcessor"), ("dpt", "DPTImageProcessor"), ("efficientformer", "EfficientFormerImageProcessor"), + ("efficientnet", "EfficientNetImageProcessor"), ("flava", "FlavaImageProcessor"), ("git", "CLIPImageProcessor"), ("glpn", "GLPNImageProcessor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index b13f79eaf68b..04a7359bf0db 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -73,6 +73,7 @@ ("dpr", "DPRQuestionEncoder"), ("dpt", "DPTModel"), ("efficientformer", "EfficientFormerModel"), + ("efficientnet", "EfficientNetModel"), ("electra", "ElectraModel"), ("ernie", "ErnieModel"), ("ernie_m", "ErnieMModel"), @@ -419,6 +420,7 @@ "EfficientFormerForImageClassificationWithTeacher", ), ), + ("efficientnet", "EfficientNetForImageClassification"), ("imagegpt", "ImageGPTForImageClassification"), ("levit", ("LevitForImageClassification", "LevitForImageClassificationWithTeacher")), ("mobilenet_v1", "MobileNetV1ForImageClassification"), @@ -933,6 +935,7 @@ ("bit", "BitBackbone"), ("convnext", "ConvNextBackbone"), ("dinat", "DinatBackbone"), + ("efficientnet", "EfficientNetBackbone"), ("maskformer-swin", "MaskFormerSwinBackbone"), ("nat", "NatBackbone"), ("resnet", "ResNetBackbone"), diff --git a/src/transformers/models/efficientnet/__init__.py b/src/transformers/models/efficientnet/__init__.py new file mode 100644 index 000000000000..6df523721aef --- /dev/null +++ b/src/transformers/models/efficientnet/__init__.py @@ -0,0 +1,84 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +# rely on isort to merge the imports +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_efficientnet": [ + "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", + "EfficientNetConfig", + "EfficientNetOnnxConfig", + ] +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_efficientnet"] = ["EfficientNetImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_efficientnet"] = [ + "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "EfficientNetForImageClassification", + "EfficientNetModel", + "EfficientNetPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_efficientnet import ( + EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + EfficientNetConfig, + EfficientNetOnnxConfig, + ) + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_efficientnet import EfficientNetImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_efficientnet import ( + EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, + EfficientNetForImageClassification, + EfficientNetModel, + EfficientNetPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/src/transformers/models/efficientnet/configuration_efficientnet.py b/src/transformers/models/efficientnet/configuration_efficientnet.py new file mode 100644 index 000000000000..a6e4d172f442 --- /dev/null +++ b/src/transformers/models/efficientnet/configuration_efficientnet.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" EfficientNet model configuration""" + +from collections import OrderedDict +from typing import List, Mapping + +from packaging import version + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json", +} + + +class EfficientNetConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an + EfficientNet model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the EfficientNet + [google/efficientnet-b7](https://huggingface.co/google/efficientnet-b7) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + image_size (`int`, *optional*, defaults to 600): + The input image size. + width_coefficient (`float`, *optional*, defaults to 2.0): + Scaling coefficient for network width at each stage. + depth_coefficient (`float`, *optional*, defaults to 3.1): + Scaling coefficient for network depth at each stage. + depth_divisor `int`, *optional*, defaults to 8): + A unit of network width. + kernel_sizes (`List[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`): + List of kernel sizes to be used in each block. + in_channels (`List[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`): + List of input channel sizes to be used in each block for convolutional layers. + out_channels (`List[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`): + List of output channel sizes to be used in each block for convolutional layers. + depthwise_padding (`List[int]`, *optional*, defaults to `[]`): + List of block indices with square padding. + strides: (`List[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`): + List of stride sizes to be used in each block for convolutional layers. + num_block_repeats (`List[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`): + List of the number of times each block is to repeated. + expand_ratios (`List[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`): + List of scaling coefficient of each block. + squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25): + Squeeze expansion ratio. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, + `"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported. + hiddem_dim (`int`, *optional*, defaults to 1280): + The hidden dimension of the layer before the classification head. + pooling_type (`str` or `function`, *optional*, defaults to `"mean"`): + Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`, + `"max"`] + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + batch_norm_eps (`float`, *optional*, defaults to 1e-3): + The epsilon used by the batch normalization layers. + batch_norm_momentum (`float`, *optional*, defaults to 0.99): + The momentum used by the batch normalization layers. + dropout_rate (`float`, *optional*, defaults to 0.5): + The dropout rate to be applied before final classifier layer. + drop_connect_rate (`float`, *optional*, defaults to 0.2): + The drop rate for skip connections. + + Example: + ```python + >>> from transformers import EfficientNetConfig, EfficientNetModel + + >>> # Initializing a EfficientNet efficientnet-b7 style configuration + >>> configuration = EfficientNetConfig() + + >>> # Initializing a model (with random weights) from the efficientnet-b7 style configuration + >>> model = EfficientNetModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "efficientnet" + + def __init__( + self, + num_channels: int = 3, + image_size: int = 600, + width_coefficient: float = 2.0, + depth_coefficient: float = 3.1, + depth_divisor: int = 8, + kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3], + in_channels: List[int] = [32, 16, 24, 40, 80, 112, 192], + out_channels: List[int] = [16, 24, 40, 80, 112, 192, 320], + depthwise_padding: List[int] = [], + strides: List[int] = [1, 2, 2, 2, 1, 2, 1], + num_block_repeats: List[int] = [1, 2, 2, 3, 3, 4, 1], + expand_ratios: List[int] = [1, 6, 6, 6, 6, 6, 6], + squeeze_expansion_ratio: float = 0.25, + hidden_act: str = "swish", + hidden_dim: int = 2560, + pooling_type: str = "mean", + initializer_range: float = 0.02, + batch_norm_eps: float = 0.001, + batch_norm_momentum: float = 0.99, + dropout_rate: float = 0.5, + drop_connect_rate: float = 0.2, + **kwargs, + ): + super().__init__(**kwargs) + + self.num_channels = num_channels + self.image_size = image_size + self.width_coefficient = width_coefficient + self.depth_coefficient = depth_coefficient + self.depth_divisor = depth_divisor + self.kernel_sizes = kernel_sizes + self.in_channels = in_channels + self.out_channels = out_channels + self.depthwise_padding = depthwise_padding + self.strides = strides + self.num_block_repeats = num_block_repeats + self.expand_ratios = expand_ratios + self.squeeze_expansion_ratio = squeeze_expansion_ratio + self.hidden_act = hidden_act + self.hidden_dim = hidden_dim + self.pooling_type = pooling_type + self.initializer_range = initializer_range + self.batch_norm_eps = batch_norm_eps + self.batch_norm_momentum = batch_norm_momentum + self.dropout_rate = dropout_rate + self.drop_connect_rate = drop_connect_rate + self.num_hidden_layers = sum(num_block_repeats) * 4 + + +class EfficientNetOnnxConfig(OnnxConfig): + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-5 diff --git a/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py b/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py new file mode 100644 index 000000000000..dc7b592bfb5b --- /dev/null +++ b/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py @@ -0,0 +1,339 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert EfficientNet checkpoints from the original repository. + +URL: https://github.com/keras-team/keras/blob/v2.11.0/keras/applications/efficientnet.py""" + +import argparse +import json +import os + +import numpy as np +import PIL +import requests +import tensorflow.keras.applications.efficientnet as efficientnet +import torch +from huggingface_hub import hf_hub_download +from PIL import Image +from tensorflow.keras.preprocessing import image + +from transformers import ( + EfficientNetConfig, + EfficientNetForImageClassification, + EfficientNetImageProcessor, +) +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + +model_classes = { + "b0": efficientnet.EfficientNetB0, + "b1": efficientnet.EfficientNetB1, + "b2": efficientnet.EfficientNetB2, + "b3": efficientnet.EfficientNetB3, + "b4": efficientnet.EfficientNetB4, + "b5": efficientnet.EfficientNetB5, + "b6": efficientnet.EfficientNetB6, + "b7": efficientnet.EfficientNetB7, +} + +CONFIG_MAP = { + "b0": { + "hidden_dim": 1280, + "width_coef": 1.0, + "depth_coef": 1.0, + "image_size": 224, + "dropout_rate": 0.2, + "dw_padding": [], + }, + "b1": { + "hidden_dim": 1280, + "width_coef": 1.0, + "depth_coef": 1.1, + "image_size": 240, + "dropout_rate": 0.2, + "dw_padding": [16], + }, + "b2": { + "hidden_dim": 1408, + "width_coef": 1.1, + "depth_coef": 1.2, + "image_size": 260, + "dropout_rate": 0.3, + "dw_padding": [5, 8, 16], + }, + "b3": { + "hidden_dim": 1536, + "width_coef": 1.2, + "depth_coef": 1.4, + "image_size": 300, + "dropout_rate": 0.3, + "dw_padding": [5, 18], + }, + "b4": { + "hidden_dim": 1792, + "width_coef": 1.4, + "depth_coef": 1.8, + "image_size": 380, + "dropout_rate": 0.4, + "dw_padding": [6], + }, + "b5": { + "hidden_dim": 2048, + "width_coef": 1.6, + "depth_coef": 2.2, + "image_size": 456, + "dropout_rate": 0.4, + "dw_padding": [13, 27], + }, + "b6": { + "hidden_dim": 2304, + "width_coef": 1.8, + "depth_coef": 2.6, + "image_size": 528, + "dropout_rate": 0.5, + "dw_padding": [31], + }, + "b7": { + "hidden_dim": 2560, + "width_coef": 2.0, + "depth_coef": 3.1, + "image_size": 600, + "dropout_rate": 0.5, + "dw_padding": [18], + }, +} + + +def get_efficientnet_config(model_name): + config = EfficientNetConfig() + config.hidden_dim = CONFIG_MAP[model_name]["hidden_dim"] + config.width_coefficient = CONFIG_MAP[model_name]["width_coef"] + config.depth_coefficient = CONFIG_MAP[model_name]["depth_coef"] + config.image_size = CONFIG_MAP[model_name]["image_size"] + config.dropout_rate = CONFIG_MAP[model_name]["dropout_rate"] + config.depthwise_padding = CONFIG_MAP[model_name]["dw_padding"] + + repo_id = "huggingface/label-files" + filename = "imagenet-1k-id2label.json" + config.num_labels = 1000 + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) + id2label = {int(k): v for k, v in id2label.items()} + + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + return config + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +def convert_image_processor(model_name): + size = CONFIG_MAP[model_name]["image_size"] + preprocessor = EfficientNetImageProcessor( + size={"height": size, "width": size}, + image_mean=[0.485, 0.456, 0.406], + image_std=[0.47853944, 0.4732864, 0.47434163], + do_center_crop=False, + ) + return preprocessor + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def rename_keys(original_param_names): + block_names = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")] + block_names = sorted(list(set(block_names))) + num_blocks = len(block_names) + block_name_mapping = {b: str(i) for b, i in zip(block_names, range(num_blocks))} + + rename_keys = [] + rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight")) + rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight")) + rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias")) + rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean")) + rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var")) + + for b in block_names: + hf_b = block_name_mapping[b] + rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight")) + rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight")) + rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias")) + rename_keys.append( + (f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") + ) + rename_keys.append( + (f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") + ) + rename_keys.append( + (f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") + ) + rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight")) + rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias")) + rename_keys.append( + (f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") + ) + rename_keys.append( + (f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") + ) + + rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight")) + rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias")) + rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight")) + rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias")) + rename_keys.append( + (f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") + ) + rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight")) + rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias")) + rename_keys.append( + (f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") + ) + rename_keys.append( + (f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") + ) + + rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight")) + rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight")) + rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias")) + rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean")) + rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var")) + + key_mapping = {} + for item in rename_keys: + if item[0] in original_param_names: + key_mapping[item[0]] = "efficientnet." + item[1] + + key_mapping["predictions/kernel:0"] = "classifier.weight" + key_mapping["predictions/bias:0"] = "classifier.bias" + return key_mapping + + +def replace_params(hf_params, tf_params, key_mapping): + for key, value in tf_params.items(): + if "normalization" in key: + continue + + hf_key = key_mapping[key] + if "_conv" in key and "kernel" in key: + new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1) + elif "depthwise_kernel" in key: + new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1) + elif "kernel" in key: + new_hf_value = torch.from_numpy(np.transpose(value)) + else: + new_hf_value = torch.from_numpy(value) + + # Replace HF parameters with original TF model parameters + assert hf_params[hf_key].shape == new_hf_value.shape + hf_params[hf_key].copy_(new_hf_value) + + +@torch.no_grad() +def convert_efficientnet_checkpoint(model_name, pytorch_dump_folder_path, save_model, push_to_hub): + """ + Copy/paste/tweak model's weights to our EfficientNet structure. + """ + # Load original model + original_model = model_classes[model_name]( + include_top=True, + weights="imagenet", + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + classifier_activation="softmax", + ) + + tf_params = original_model.trainable_variables + tf_non_train_params = original_model.non_trainable_variables + tf_params = {param.name: param.numpy() for param in tf_params} + for param in tf_non_train_params: + tf_params[param.name] = param.numpy() + tf_param_names = [k for k in tf_params.keys()] + + # Load HuggingFace model + config = get_efficientnet_config(model_name) + hf_model = EfficientNetForImageClassification(config).eval() + hf_params = hf_model.state_dict() + + # Create src-to-dst parameter name mapping dictionary + print("Converting parameters...") + key_mapping = rename_keys(tf_param_names) + replace_params(hf_params, tf_params, key_mapping) + + # Initialize preprocessor and preprocess input image + preprocessor = convert_image_processor(model_name) + inputs = preprocessor(images=prepare_img(), return_tensors="pt") + + # HF model inference + hf_model.eval() + with torch.no_grad(): + outputs = hf_model(**inputs) + hf_logits = outputs.logits.detach().numpy() + + # Original model inference + original_model.trainable = False + image_size = CONFIG_MAP[model_name]["image_size"] + img = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST) + x = image.img_to_array(img) + x = np.expand_dims(x, axis=0) + original_logits = original_model.predict(x) + + # Check whether original and HF model outputs match -> np.allclose + assert np.allclose(original_logits, hf_logits, atol=1e-3), "The predicted logits are not the same." + print("Model outputs match!") + + if save_model: + # Create folder to save model + if not os.path.isdir(pytorch_dump_folder_path): + os.mkdir(pytorch_dump_folder_path) + # Save converted model and feature extractor + hf_model.save_pretrained(pytorch_dump_folder_path) + preprocessor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + # Push model and feature extractor to hub + print(f"Pushing converted {model_name} to the hub...") + model_name = f"efficientnet-{model_name}" + preprocessor.push_to_hub(model_name) + hf_model.push_to_hub(model_name) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="b0", + type=str, + help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default="hf_model", + type=str, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument("--save_model", action="store_true", help="Save model to local") + parser.add_argument("--push_to_hub", action="store_true", help="Push model and feature extractor to the hub") + + args = parser.parse_args() + convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) diff --git a/src/transformers/models/efficientnet/image_processing_efficientnet.py b/src/transformers/models/efficientnet/image_processing_efficientnet.py new file mode 100644 index 000000000000..da547f9ccb09 --- /dev/null +++ b/src/transformers/models/efficientnet/image_processing_efficientnet.py @@ -0,0 +1,347 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for EfficientNet.""" + +from typing import Dict, List, Optional, Union + +import numpy as np + +from transformers.utils import is_vision_available +from transformers.utils.generic import TensorType + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format +from ...image_utils import ( + IMAGENET_STANDARD_MEAN, + IMAGENET_STANDARD_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + make_list_of_images, + to_numpy_array, + valid_images, +) +from ...utils import logging + + +if is_vision_available(): + import PIL + + +logger = logging.get_logger(__name__) + + +class EfficientNetImageProcessor(BaseImageProcessor): + r""" + Constructs a EfficientNet image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by + `do_resize` in `preprocess`. + size (`Dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`): + Size of the image after `resize`. Can be overridden by `size` in `preprocess`. + resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.NEAREST`): + Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. + do_center_crop (`bool`, *optional*, defaults to `False`): + Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image + is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`. + crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`): + Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. + rescale_offset (`bool`, *optional*, defaults to `False`): + Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be + overridden by the `rescale_factor` parameter in the `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + include_top (`bool`, *optional*, defaults to `True`): + Whether to rescale the image again. Should be set to True if the inputs are used for image classification. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PIL.Image.NEAREST, + do_center_crop: bool = False, + crop_size: Dict[str, int] = None, + rescale_factor: Union[int, float] = 1 / 255, + rescale_offset: bool = False, + do_rescale: bool = True, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + include_top: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"height": 346, "width": 346} + size = get_size_dict(size) + crop_size = crop_size if crop_size is not None else {"height": 289, "width": 289} + crop_size = get_size_dict(crop_size, param_name="crop_size") + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.rescale_offset = rescale_offset + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD + self.include_top = include_top + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PIL.Image.NEAREST, + data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])` using the specified resampling filter. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. + resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.NEAREST`): + Resampling filter to use when resizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") + return resize( + image, size=(size["height"], size["width"]), resample=resample, data_format=data_format, **kwargs + ) + + def center_crop( + self, + image: np.ndarray, + size: Dict[str, int], + data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Center crop an image to `(crop_size["height"], crop_size["width"])`. If the input size is smaller than + `crop_size` along any edge, the image is padded with 0's and then center cropped. + + Args: + image (`np.ndarray`): + Image to center crop. + size (`Dict[str, int]`): + Size of the output image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") + return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs) + + def rescale( + self, + image: np.ndarray, + scale: Union[int, float], + offset: bool = True, + data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Rescale an image by a scale factor. image = image * scale. + + Args: + image (`np.ndarray`): + Image to rescale. + scale (`int` or `float`): + Scale to apply to the image. + offset (`bool`, *optional*): + Whether to scale the image in both negative and positive directions. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + """ + if offset: + rescaled_image = (image - 127.5) * scale + if data_format is not None: + rescaled_image = to_channel_dimension_format(rescaled_image, data_format) + rescaled_image = rescaled_image.astype(np.float32) + else: + rescaled_image = rescale(image, scale=scale, data_format=data_format, **kwargs) + return rescale(image, scale=scale, data_format=data_format, **kwargs) + + def normalize( + self, + image: np.ndarray, + mean: Union[float, List[float]], + std: Union[float, List[float]], + data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Normalize an image. image = (image - image_mean) / image_std. + + Args: + image (`np.ndarray`): + Image to normalize. + image_mean (`float` or `List[float]`): + Image mean. + image_std (`float` or `List[float]`): + Image standard deviation. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + """ + return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) + + def preprocess( + self, + images: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample=None, + do_center_crop: bool = None, + crop_size: Dict[str, int] = None, + do_rescale: bool = None, + rescale_factor: float = None, + rescale_offset: bool = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + include_top: bool = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: ChannelDimension = ChannelDimension.FIRST, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after `resize`. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to + `True`. + do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): + Whether to center crop the image. + crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): + Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be + padded with zeros and then cropped + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`): + Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation. + include_top (`bool`, *optional*, defaults to `self.include_top`): + Rescales the image again for image classification if set to True. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - `None`: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `ChannelDimension.LAST`: image in (height, width, num_channels) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + resample = resample if resample is not None else self.resample + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + include_top = include_top if include_top is not None else self.include_top + + size = size if size is not None else self.size + size = get_size_dict(size) + crop_size = crop_size if crop_size is not None else self.crop_size + crop_size = get_size_dict(crop_size, param_name="crop_size") + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if do_resize and size is None or resample is None: + raise ValueError("Size and resample must be specified if do_resize is True.") + + if do_center_crop and crop_size is None: + raise ValueError("Crop size must be specified if do_center_crop is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if do_resize: + images = [self.resize(image=image, size=size, resample=resample) for image in images] + + if do_center_crop: + images = [self.center_crop(image=image, size=crop_size) for image in images] + + if do_rescale: + images = [self.rescale(image=image, scale=rescale_factor, offset=rescale_offset) for image in images] + + if do_normalize: + images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] + + if include_top: + images = [self.normalize(image=image, mean=[0, 0, 0], std=image_std) for image in images] + + images = [to_channel_dimension_format(image, data_format) for image in images] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/src/transformers/models/efficientnet/modeling_efficientnet.py b/src/transformers/models/efficientnet/modeling_efficientnet.py new file mode 100644 index 000000000000..4741e66fd585 --- /dev/null +++ b/src/transformers/models/efficientnet/modeling_efficientnet.py @@ -0,0 +1,662 @@ +# coding=utf-8 +# Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch EfficientNet model.""" + + +import math +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithNoAttention, + BaseModelOutputWithPoolingAndNoAttention, + ImageClassifierOutputWithNoAttention, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from .configuration_efficientnet import EfficientNetConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "EfficientNetConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "google/efficientnet-b7" +_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "google/efficientnet-b7" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + +EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "google/efficientnet-b7", + # See all EfficientNet models at https://huggingface.co/models?filter=efficientnet +] + + +EFFICIENTNET_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`EfficientNetConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +EFFICIENTNET_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`AutoImageProcessor.__call__`] for details. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +def round_filters(config: EfficientNetConfig, num_channels: int): + r""" + Round number of filters based on depth multiplier. + """ + divisor = config.depth_divisor + num_channels *= config.width_coefficient + new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor) + + # Make sure that round down does not go down by more than 10%. + if new_dim < 0.9 * num_channels: + new_dim += divisor + + return int(new_dim) + + +def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True): + r""" + Utility function to get the tuple padding value for the depthwise convolution. + + Args: + kernel_size (`int` or `tuple`): + Kernel size of the convolution layers. + adjust (`bool`, *optional*, defaults to `True`): + Adjusts padding value to apply to right and bottom sides of the input. + """ + if isinstance(kernel_size, int): + kernel_size = (kernel_size, kernel_size) + + correct = (kernel_size[0] // 2, kernel_size[1] // 2) + if adjust: + return (correct[1] - 1, correct[1], correct[0] - 1, correct[0]) + else: + return (correct[1], correct[1], correct[0], correct[0]) + + +class EfficientNetEmbeddings(nn.Module): + r""" + A module that corresponds to the stem module of the original work. + """ + + def __init__(self, config: EfficientNetConfig): + super().__init__() + + out_channels = round_filters(config, 32) + self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1)) + self.convolution = nn.Conv2d( + config.num_channels, out_channels, kernel_size=3, stride=2, padding="valid", bias=False + ) + self.batchnorm = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) + self.activation = ACT2FN[config.hidden_act] + + def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: + features = self.padding(pixel_values) + features = self.convolution(features) + features = self.batchnorm(features) + features = self.activation(features) + + return features + + +class EfficientNetDepthwiseConv2d(nn.Conv2d): + def __init__( + self, + in_channels, + depth_multiplier=1, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + bias=True, + padding_mode="zeros", + ): + out_channels = in_channels * depth_multiplier + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=bias, + padding_mode=padding_mode, + ) + + +class EfficientNetExpansionLayer(nn.Module): + r""" + This corresponds to the expansion phase of each block in the original implementation. + """ + + def __init__( + self, + config: EfficientNetConfig, + in_dim: int, + out_dim: int, + stride: int, + ): + super().__init__() + self.expand_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=1, + padding="same", + bias=False, + ) + self.expand_bn = nn.BatchNorm2d(num_features=out_dim) + self.expand_act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + # Expand phase + hidden_states = self.expand_conv(hidden_states) + hidden_states = self.expand_bn(hidden_states) + hidden_states = self.expand_act(hidden_states) + + return hidden_states + + +class EfficientNetDepthwiseLayer(nn.Module): + r""" + This corresponds to the depthwise convolution phase of each block in the original implementation. + """ + + def __init__( + self, + config: EfficientNetConfig, + in_dim: int, + stride: int, + kernel_size: int, + adjust_padding: bool, + ): + super().__init__() + self.stride = stride + conv_pad = "valid" if self.stride == 2 else "same" + padding = correct_pad(kernel_size, adjust=adjust_padding) + + self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) + self.depthwise_conv = EfficientNetDepthwiseConv2d( + in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False + ) + self.depthwise_norm = nn.BatchNorm2d( + num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum + ) + self.depthwise_act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + # Depthwise convolution + if self.stride == 2: + hidden_states = self.depthwise_conv_pad(hidden_states) + + hidden_states = self.depthwise_conv(hidden_states) + hidden_states = self.depthwise_norm(hidden_states) + hidden_states = self.depthwise_act(hidden_states) + + return hidden_states + + +class EfficientNetSqueezeExciteLayer(nn.Module): + r""" + This corresponds to the Squeeze and Excitement phase of each block in the original implementation. + """ + + def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool = False): + super().__init__() + self.dim = expand_dim if expand else in_dim + self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) + + self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) + self.reduce = nn.Conv2d( + in_channels=self.dim, + out_channels=self.dim_se, + kernel_size=1, + padding="same", + ) + self.expand = nn.Conv2d( + in_channels=self.dim_se, + out_channels=self.dim, + kernel_size=1, + padding="same", + ) + self.act_reduce = ACT2FN[config.hidden_act] + self.act_expand = nn.Sigmoid() + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + inputs = hidden_states + hidden_states = self.squeeze(hidden_states) + hidden_states = self.reduce(hidden_states) + hidden_states = self.act_reduce(hidden_states) + + hidden_states = self.expand(hidden_states) + hidden_states = self.act_expand(hidden_states) + hidden_states = torch.mul(inputs, hidden_states) + + return hidden_states + + +class EfficientNetFinalBlockLayer(nn.Module): + r""" + This corresponds to the final phase of each block in the original implementation. + """ + + def __init__( + self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool + ): + super().__init__() + self.apply_dropout = stride == 1 and not id_skip + self.project_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=1, + padding="same", + bias=False, + ) + self.project_bn = nn.BatchNorm2d( + num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum + ) + self.dropout = nn.Dropout(p=drop_rate) + + def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor: + hidden_states = self.project_conv(hidden_states) + hidden_states = self.project_bn(hidden_states) + + if self.apply_dropout: + hidden_states = self.dropout(hidden_states) + hidden_states = hidden_states + embeddings + + return hidden_states + + +class EfficientNetBlock(nn.Module): + r""" + This corresponds to the expansion and depthwise convolution phase of each block in the original implementation. + + Args: + config ([`EfficientNetConfig`]): + Model configuration class. + in_dim (`int`): + Number of input channels. + out_dim (`int`): + Number of output channels. + stride (`int`): + Stride size to be used in convolution layers. + expand_ratio (`int`): + Expand ratio to set the output dimensions for the expansion and squeeze-excite layers. + kernel_size (`int`): + Kernel size for the depthwise convolution layer. + drop_rate (`float`): + Dropout rate to be used in the final phase of each block. + id_skip (`bool`): + Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase + of each block. Set to `True` for the first block of each stage. + adjust_padding (`bool`): + Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution + operation, set to `True` for inputs with odd input sizes. + """ + + def __init__( + self, + config: EfficientNetConfig, + in_dim: int, + out_dim: int, + stride: int, + expand_ratio: int, + kernel_size: int, + drop_rate: float, + id_skip: bool, + adjust_padding: bool, + ): + super().__init__() + self.expand_ratio = expand_ratio + self.expand = True if self.expand_ratio != 1 else False + expand_in_dim = in_dim * expand_ratio + + if self.expand: + self.expansion = EfficientNetExpansionLayer( + config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride + ) + + self.depthwise_conv = EfficientNetDepthwiseLayer( + config=config, + in_dim=expand_in_dim if self.expand else in_dim, + stride=stride, + kernel_size=kernel_size, + adjust_padding=adjust_padding, + ) + self.squeeze_excite = EfficientNetSqueezeExciteLayer( + config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand + ) + self.projection = EfficientNetFinalBlockLayer( + config=config, + in_dim=expand_in_dim if self.expand else in_dim, + out_dim=out_dim, + stride=stride, + drop_rate=drop_rate, + id_skip=id_skip, + ) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + embeddings = hidden_states + # Expansion and depthwise convolution phase + if self.expand_ratio != 1: + hidden_states = self.expansion(hidden_states) + hidden_states = self.depthwise_conv(hidden_states) + + # Squeeze and excite phase + hidden_states = self.squeeze_excite(hidden_states) + hidden_states = self.projection(embeddings, hidden_states) + return hidden_states + + +class EfficientNetEncoder(nn.Module): + r""" + Forward propogates the embeddings through each EfficientNet block. + + Args: + config ([`EfficientNetConfig`]): + Model configuration class. + """ + + def __init__(self, config: EfficientNetConfig): + super().__init__() + self.config = config + self.depth_coefficient = config.depth_coefficient + + def round_repeats(repeats): + # Round number of block repeats based on depth multiplier. + return int(math.ceil(self.depth_coefficient * repeats)) + + num_base_blocks = len(config.in_channels) + num_blocks = sum(round_repeats(n) for n in config.num_block_repeats) + + curr_block_num = 0 + blocks = [] + for i in range(num_base_blocks): + in_dim = round_filters(config, config.in_channels[i]) + out_dim = round_filters(config, config.out_channels[i]) + stride = config.strides[i] + kernel_size = config.kernel_sizes[i] + expand_ratio = config.expand_ratios[i] + + for j in range(round_repeats(config.num_block_repeats[i])): + id_skip = True if j == 0 else False + stride = 1 if j > 0 else stride + in_dim = out_dim if j > 0 else in_dim + adjust_padding = False if curr_block_num in config.depthwise_padding else True + drop_rate = config.drop_connect_rate * curr_block_num / num_blocks + + block = EfficientNetBlock( + config=config, + in_dim=in_dim, + out_dim=out_dim, + stride=stride, + kernel_size=kernel_size, + expand_ratio=expand_ratio, + drop_rate=drop_rate, + id_skip=id_skip, + adjust_padding=adjust_padding, + ) + blocks.append(block) + curr_block_num += 1 + + self.blocks = nn.ModuleList(blocks) + self.top_conv = nn.Conv2d( + in_channels=out_dim, + out_channels=round_filters(config, 1280), + kernel_size=1, + padding="same", + bias=False, + ) + self.top_bn = nn.BatchNorm2d( + num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum + ) + self.top_activation = ACT2FN[config.hidden_act] + + def forward( + self, + hidden_states: torch.FloatTensor, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> BaseModelOutputWithNoAttention: + all_hidden_states = (hidden_states,) if output_hidden_states else None + + for block in self.blocks: + hidden_states = block(hidden_states) + if output_hidden_states: + all_hidden_states += (hidden_states,) + + hidden_states = self.top_conv(hidden_states) + hidden_states = self.top_bn(hidden_states) + hidden_states = self.top_activation(hidden_states) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) + + return BaseModelOutputWithNoAttention( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + ) + + +class EfficientNetPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = EfficientNetConfig + base_model_prefix = "efficientnet" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, EfficientNetBlock): + module.gradient_checkpointing = value + + +@add_start_docstrings( + "The bare EfficientNet model outputting raw features without any specific head on top.", + EFFICIENTNET_START_DOCSTRING, +) +class EfficientNetModel(EfficientNetPreTrainedModel): + def __init__(self, config: EfficientNetConfig): + super().__init__(config) + self.config = config + self.embeddings = EfficientNetEmbeddings(config) + self.encoder = EfficientNetEncoder(config) + + # Final pooling layer + if config.pooling_type == "mean": + self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True) + elif config.pooling_type == "max": + self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True) + else: + raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}") + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndNoAttention, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: torch.FloatTensor = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + embedding_output = self.embeddings(pixel_values) + + encoder_outputs = self.encoder( + embedding_output, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # Apply pooling + last_hidden_state = encoder_outputs[0] + pooled_output = self.pooler(last_hidden_state) + # Reshape (batch_size, 1280, 1 , 1) -> (batch_size, 1280) + pooled_output = pooled_output.reshape(pooled_output.shape[:2]) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + ) + + +@add_start_docstrings( + """ + EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. + for ImageNet. + """, + EFFICIENTNET_START_DOCSTRING, +) +class EfficientNetForImageClassification(EfficientNetPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + self.efficientnet = EfficientNetModel(config) + # Classifier head + self.dropout = nn.Dropout(p=config.dropout_rate) + self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity() + self.classifier_act = nn.Softmax(dim=1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutputWithNoAttention, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: torch.FloatTensor = None, + labels: Optional[torch.LongTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + logits = self.classifier_act(logits) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutputWithNoAttention( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index b62d10ebdba6..a72b7678dc7c 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -2317,6 +2317,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EfficientNetForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EfficientNetPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index 30831ad9d892..5960f1e4b47e 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -191,6 +191,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class EfficientNetImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class FlavaFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] diff --git a/tests/models/convnext/test_modeling_convnext.py b/tests/models/convnext/test_modeling_convnext.py index b6abc332c424..d47bcb80f482 100644 --- a/tests/models/convnext/test_modeling_convnext.py +++ b/tests/models/convnext/test_modeling_convnext.py @@ -82,7 +82,6 @@ def prepare_config_and_inputs(self): labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() - return config, pixel_values, labels def get_config(self): diff --git a/tests/models/efficientnet/__init__.py b/tests/models/efficientnet/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/models/efficientnet/test_image_processing_efficientnet.py b/tests/models/efficientnet/test_image_processing_efficientnet.py new file mode 100644 index 000000000000..8e4fad3b0840 --- /dev/null +++ b/tests/models/efficientnet/test_image_processing_efficientnet.py @@ -0,0 +1,195 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import numpy as np + +from transformers.testing_utils import require_torch, require_vision +from transformers.utils import is_torch_available, is_vision_available + +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import EfficientNetImageProcessor + + +class EfficientNetImageProcessorTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=13, + num_channels=3, + image_size=18, + min_resolution=30, + max_resolution=400, + do_resize=True, + size=None, + do_normalize=True, + image_mean=[0.5, 0.5, 0.5], + image_std=[0.5, 0.5, 0.5], + ): + size = size if size is not None else {"height": 18, "width": 18} + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.image_size = image_size + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + + def prepare_image_processor_dict(self): + return { + "image_mean": self.image_mean, + "image_std": self.image_std, + "do_normalize": self.do_normalize, + "do_resize": self.do_resize, + "size": self.size, + } + + +@require_torch +@require_vision +class EfficientNetImageProcessorTest(ImageProcessingSavingTestMixin, unittest.TestCase): + image_processing_class = EfficientNetImageProcessor if is_vision_available() else None + + def setUp(self): + self.image_processor_tester = EfficientNetImageProcessorTester(self) + + @property + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + + def test_call_pil(self): + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) + # create random PIL images + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) + for image in image_inputs: + self.assertIsInstance(image, Image.Image) + + # Test not batched input + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], + ), + ) + + # Test batched + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], + ), + ) + + def test_call_numpy(self): + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) + # create random numpy tensors + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) + for image in image_inputs: + self.assertIsInstance(image, np.ndarray) + + # Test not batched input + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], + ), + ) + + # Test batched + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], + ), + ) + + def test_call_pytorch(self): + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) + # create random PyTorch tensors + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) + for image in image_inputs: + self.assertIsInstance(image, torch.Tensor) + + # Test not batched input + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], + ), + ) + + # Test batched + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], + ), + ) diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py new file mode 100644 index 000000000000..687cb98c5349 --- /dev/null +++ b/tests/models/efficientnet/test_modeling_efficientnet.py @@ -0,0 +1,257 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch EfficientNet model. """ + + +import inspect +import unittest + +from transformers import EfficientNetConfig +from transformers.testing_utils import require_torch, require_vision, slow, torch_device +from transformers.utils import cached_property, is_torch_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor + + +if is_torch_available(): + import torch + + from transformers import EfficientNetForImageClassification, EfficientNetModel + from transformers.models.efficientnet.modeling_efficientnet import EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from PIL import Image + + from transformers import AutoImageProcessor + + +class EfficientNetModelTester: + def __init__( + self, + parent, + batch_size=13, + image_size=32, + num_channels=3, + kernel_sizes=[3, 3, 5], + in_channels=[32, 16, 24], + out_channels=[16, 24, 40], + strides=[1, 1, 2], + num_block_repeats=[1, 1, 2], + expand_ratios=[1, 6, 6], + is_training=True, + use_labels=True, + intermediate_size=37, + hidden_act="gelu", + num_labels=10, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.num_channels = num_channels + self.kernel_sizes = kernel_sizes + self.in_channels = in_channels + self.out_channels = out_channels + self.strides = strides + self.num_block_repeats = num_block_repeats + self.expand_ratios = expand_ratios + self.is_training = is_training + self.hidden_act = hidden_act + self.num_labels = num_labels + self.use_labels = use_labels + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size], self.num_labels) + + config = self.get_config() + return config, pixel_values, labels + + def get_config(self): + return EfficientNetConfig( + num_channels=self.num_channels, + kernel_sizes=self.kernel_sizes, + in_channels=self.in_channels, + out_channels=self.out_channels, + strides=self.strides, + num_block_repeats=self.num_block_repeats, + expand_ratios=self.expand_ratios, + hidden_act=self.hidden_act, + num_labels=self.num_labels, + ) + + def create_and_check_model(self, config, pixel_values, labels): + model = EfficientNetModel(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + # expected last hidden states: B, C, H // 4, W // 4 + self.parent.assertEqual( + result.last_hidden_state.shape, + (self.batch_size, config.hidden_dim, self.image_size // 4, self.image_size // 4), + ) + + def create_and_check_for_image_classification(self, config, pixel_values, labels): + model = EfficientNetForImageClassification(config) + model.to(torch_device) + model.eval() + result = model(pixel_values, labels=labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values, labels = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class EfficientNetModelTest(ModelTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as EfficientNet does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (EfficientNetModel, EfficientNetForImageClassification) if is_torch_available() else () + + fx_compatible = False + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + has_attentions = False + + def setUp(self): + self.model_tester = EfficientNetModelTester(self) + self.config_tester = ConfigTester( + self, config_class=EfficientNetConfig, has_text_modality=False, hidden_size=37 + ) + + def test_config(self): + self.create_and_test_config_common_properties() + self.config_tester.create_and_test_config_to_json_string() + self.config_tester.create_and_test_config_to_json_file() + self.config_tester.create_and_test_config_from_and_save_pretrained() + self.config_tester.create_and_test_config_with_num_labels() + self.config_tester.check_config_can_be_init_without_params() + self.config_tester.check_config_arguments_init() + + def create_and_test_config_common_properties(self): + return + + @unittest.skip(reason="EfficientNet does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="EfficientNet does not support input and output embeddings") + def test_model_common_attributes(self): + pass + + @unittest.skip(reason="EfficientNet does not use feedforward chunking") + def test_feed_forward_chunking(self): + pass + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + num_blocks = sum(config.num_block_repeats) * 4 + self.assertEqual(len(hidden_states), num_blocks) + + # EfficientNet's feature maps are of shape (batch_size, num_channels, height, width) + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [self.model_tester.image_size // 2, self.model_tester.image_size // 2], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + def test_for_image_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_image_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + for model_name in EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = EfficientNetModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_torch +@require_vision +class EfficientNetModelIntegrationTest(unittest.TestCase): + @cached_property + def default_image_processor(self): + return AutoImageProcessor.from_pretrained("google/efficientnet-b7") if is_vision_available() else None + + @slow + def test_inference_image_classification_head(self): + model = EfficientNetForImageClassification.from_pretrained("google/efficientnet-b7").to(torch_device) + + image_processor = self.default_image_processor + image = prepare_img() + inputs = image_processor(images=image, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the logits + expected_shape = torch.Size((1, 1000)) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = torch.tensor([0.0001, 0.0002, 0.0002]).to(torch_device) + self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))