From 602bb6b8a19573b43015efb1987208dff1837b1b Mon Sep 17 00:00:00 2001 From: IvanHCenalmor Date: Wed, 8 Nov 2023 15:53:03 +0000 Subject: [PATCH] Avoid /content in new Colab_notebooks --- Colab_notebooks/3D_RCAN_ZeroCostDL4Mic.ipynb | 37 ++++---- Colab_notebooks/DFCAN_ZeroCostDL4Mic.ipynb | 13 +-- .../DRMIME_2D_ZeroCostDL4Mic.ipynb | 2 +- .../DecoNoising_2D_ZeroCostDL4Mic.ipynb | 26 +++--- Colab_notebooks/DenoiSeg_ZeroCostDL4Mic.ipynb | 25 +++--- .../Detectron2_2D_ZeroCostDL4Mic.ipynb | 2 +- .../EmbedSeg_2D_ZeroCostDL4Mic.ipynb | 18 ++-- Colab_notebooks/MaskRCNN_ZeroCostDL4Mic.ipynb | 2 +- .../RetinaNet_ZeroCostDL4Mic.ipynb | 90 ++++++++++--------- .../SplineDist_2D_ZeroCostDL4Mic.ipynb | 17 ++-- Colab_notebooks/WGAN_ZeroCostDL4Mic.ipynb | 17 ++-- 11 files changed, 133 insertions(+), 116 deletions(-) diff --git a/Colab_notebooks/3D_RCAN_ZeroCostDL4Mic.ipynb b/Colab_notebooks/3D_RCAN_ZeroCostDL4Mic.ipynb index 74e9bcbc..25b06282 100644 --- a/Colab_notebooks/3D_RCAN_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/3D_RCAN_ZeroCostDL4Mic.ipynb @@ -163,7 +163,7 @@ "from builtins import any as b_any\n", "\n", "def get_requirements_path():\n", - " # Store requirements file in 'contents' directory\n", + " # Store requirements file in 'base_path' directory\n", " current_dir = os.getcwd()\n", " dir_count = current_dir.count('/') - 1\n", " path = '../' * (dir_count) + 'requirements.txt'\n", @@ -215,6 +215,9 @@ "import os\n", "import pandas as pd\n", "\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", + "\n", "!pip uninstall -y keras-nightly\n", "\n", "\n", @@ -520,8 +523,8 @@ " pdf.ln(1)\n", " pdf.cell(60, 5, txt = 'Example Training pair', ln=1)\n", " pdf.ln(1)\n", - " exp_size = io.imread('/content/TrainingDataExample_3D_RCAN.png').shape\n", - " pdf.image('/content/TrainingDataExample_3D_RCAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", + " exp_size = io.imread(base_path + '/TrainingDataExample_3D_RCAN.png').shape\n", + " pdf.image(base_path + '/TrainingDataExample_3D_RCAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", " pdf.ln(1)\n", " ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n", " pdf.multi_cell(190, 5, txt = ref_1, align='L')\n", @@ -538,7 +541,7 @@ " if trained:\n", " pdf.output(model_path+'/'+model_name+'/'+model_name+\"_training_report.pdf\")\n", " else:\n", - " pdf.output('/content/'+model_name+\"_training_report.pdf\")\n", + " pdf.output(base_path + '/'+model_name+\"_training_report.pdf\")\n", "\n", "\n", "def qc_pdf_export():\n", @@ -758,7 +761,7 @@ "\n", "# mount user's Google Drive to Google Colab.\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')" + "drive.mount(base_path + '/gdrive')" ] }, { @@ -846,7 +849,7 @@ "#@markdown ###Path to training images:\n", "\n", "# base folder of GT and low images\n", - "base = \"/content\"\n", + "base = base_path + \"\"\n", "\n", "# low SNR images\n", "Training_source = \"\" #@param {type:\"string\"}\n", @@ -944,26 +947,26 @@ "File_for_validation = int((number_files)/percentage_validation)+1\n", "\n", "#Here we split the training dataset between training and validation\n", - "# Everything is copied in the /Content Folder\n", + "# Everything is copied in the \"base_path\" Folder\n", "\n", - "Training_source_temp = \"/content/training_source\"\n", + "Training_source_temp = base_path + \"/training_source\"\n", "\n", "if os.path.exists(Training_source_temp):\n", " shutil.rmtree(Training_source_temp)\n", "os.makedirs(Training_source_temp)\n", "\n", - "Training_target_temp = \"/content/training_target\"\n", + "Training_target_temp = base_path + \"/training_target\"\n", "if os.path.exists(Training_target_temp):\n", " shutil.rmtree(Training_target_temp)\n", "os.makedirs(Training_target_temp)\n", "\n", - "Validation_source_temp = \"/content/validation_source\"\n", + "Validation_source_temp = base_path + \"/validation_source\"\n", "\n", "if os.path.exists(Validation_source_temp):\n", " shutil.rmtree(Validation_source_temp)\n", "os.makedirs(Validation_source_temp)\n", "\n", - "Validation_target_temp = \"/content/validation_target\"\n", + "Validation_target_temp = base_path + \"/validation_target\"\n", "if os.path.exists(Validation_target_temp):\n", " shutil.rmtree(Validation_target_temp)\n", "os.makedirs(Validation_target_temp)\n", @@ -1007,7 +1010,7 @@ "plt.imshow(y[mid_plane], norm=simple_norm(y[mid_plane], percent = 99), interpolation='nearest')\n", "plt.axis('off')\n", "plt.title('High SNR image (single Z plane)');\n", - "plt.savefig('/content/TrainingDataExample_3D_RCAN.png',bbox_inches='tight',pad_inches=0)" + "plt.savefig(base_path + '/TrainingDataExample_3D_RCAN.png',bbox_inches='tight',pad_inches=0)" ] }, { @@ -1063,7 +1066,7 @@ "\n", "\n", "if not Save_augmented_images:\n", - " Saving_path= \"/content\"\n", + " Saving_path= base_path + \"\"\n", "\n", "\n", "def rotation_aug(Source_path, Target_path, flip=False):\n", @@ -1235,7 +1238,7 @@ " \n", "json_object = json.dumps(dictionary, indent = 4) \n", " \n", - "with open(\"/content/config.json\", \"w\") as outfile: \n", + "with open(base_path + \"/config.json\", \"w\") as outfile: \n", " outfile.write(json_object)\n", "\n", "# Export pdf summary of training parameters\n", @@ -1275,7 +1278,7 @@ "start = time.time()\n", "\n", "# Start Training\n", - "!python /content/3D-RCAN/train.py -c /content/config.json -o \"$full_model_path\"\n", + "!python \"$base_path\"/3D-RCAN/train.py -c \"$base_path\"/config.json -o \"$full_model_path\"\n", "\n", "print(\"Training, done.\")\n", "\n", @@ -1449,7 +1452,7 @@ "\n", "print(\"Restoring images...\")\n", "\n", - "!python /content/3D-RCAN/apply.py -m \"$full_QC_model_path\" -i \"$Source_QC_folder\" -o \"$path_QC_prediction\"\n", + "!python \"$base_path\"/3D-RCAN/apply.py -m \"$full_QC_model_path\" -i \"$Source_QC_folder\" -o \"$path_QC_prediction\"\n", "\n", "print(\"Done...\")\n", "\n", @@ -1836,7 +1839,7 @@ "\n", "print(\"Restoring images...\")\n", "\n", - "!python /content/3D-RCAN/apply.py -m \"$full_Prediction_model_path\" -i \"$Data_folder\" -o \"$Result_folder\"\n", + "!python \"$base_path\"/3D-RCAN/apply.py -m \"$full_Prediction_model_path\" -i \"$Data_folder\" -o \"$Result_folder\"\n", "\n", "print(\"Images saved into the result folder:\", Result_folder)\n", "\n", diff --git a/Colab_notebooks/DFCAN_ZeroCostDL4Mic.ipynb b/Colab_notebooks/DFCAN_ZeroCostDL4Mic.ipynb index 1bbbf83c..f0689ea8 100644 --- a/Colab_notebooks/DFCAN_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/DFCAN_ZeroCostDL4Mic.ipynb @@ -227,6 +227,9 @@ "import warnings\n", "warnings.filterwarnings('ignore')\n", "\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", + "\n", "# -------------- Other definitions -----------\n", "W = '\\033[0m' # white (normal)\n", "R = '\\033[31m' # red\n", @@ -1025,8 +1028,8 @@ " pdf.ln(1)\n", " pdf.cell(60, 5, txt = 'Example Training pair', ln=1)\n", " pdf.ln(1)\n", - " exp_size = io.imread(\"/content/ExampleData.png\").shape\n", - " pdf.image(\"/content/ExampleData.png\", x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", + " exp_size = io.imread(base_path + \"/ExampleData.png\").shape\n", + " pdf.image(base_path + \"/ExampleData.png\", x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", " pdf.ln(1)\n", " ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy.\" BioRxiv (2020).'\n", " pdf.multi_cell(190, 5, txt = ref_1, align='L')\n", @@ -1322,7 +1325,7 @@ "#mounts user's Google Drive to Google Colab.\n", "\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')\n", + "drive.mount(base_path + '/gdrive')\n", "\n", "\n" ] @@ -1611,7 +1614,7 @@ "\n", " if pretrained_model_choice == \"Model_name\":\n", " pretrained_model_name = \"Model_name\"\n", - " pretrained_model_path = \"/content/\"+pretrained_model_name\n", + " pretrained_model_path = base_path + \"/\"+pretrained_model_name\n", " print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n", " if os.path.exists(pretrained_model_path):\n", " shutil.rmtree(pretrained_model_path)\n", @@ -1730,7 +1733,7 @@ "plt.subplot(1, 2, 2)\n", "plt.imshow( train_patches_gt[0], 'gray' )\n", "plt.title( 'Training patch at full resolution' )\n", - "plt.savefig('/content/ExampleData.png', bbox_inches='tight', pad_inches=0)\n", + "plt.savefig(base_path + '/ExampleData.png', bbox_inches='tight', pad_inches=0)\n", "\n", "# Prepare the training data and create data generators\n", "# training input\n", diff --git a/Colab_notebooks/DRMIME_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/DRMIME_2D_ZeroCostDL4Mic.ipynb index abb55e7a..ed5519db 100644 --- a/Colab_notebooks/DRMIME_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/DRMIME_2D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"IkSguVy8Xv83"},"source":["# **DRMIME (2D)**\n","\n","---\n","\n"," DRMIME is a self-supervised deep-learning method that can be used to register 2D images.\n","\n"," **This particular notebook enables self-supervised registration of 2D dataset.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories. \n","\n","\n","While this notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (ZeroCostDL4Mic), this notebook structure substantially deviates from other ZeroCostDL4Mic notebooks and our template. This is because the deep learning method employed here is used to improve the image registration process. No Deep Learning models are actually saved, only the registered images. \n","\n","\n","This notebook is largely based on the following paper:\n","\n","DRMIME: Differentiable Mutual Information and Matrix Exponential for Multi-Resolution Image Registration by Abhishek Nan\n"," *et al.* published on arXiv in 2020 (https://arxiv.org/abs/2001.09865)\n","\n","And source code found in: https://github.com/abnan/DRMIME\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"gKDLkLWUd-YX"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For DRMIME to train, it requires at least two images. One **`\"Fixed image\"`** (template for the registration) and one **`Moving Image`** (image to be registered). Multiple **`Moving Images`** can also be provided if you want to register them to the same **`\"Fixed image\"`**. If you provide several **`Moving Images`**, multiple DRMIME instances will run one after another. \n","\n","The registration can also be applied to other channels. If you wish to apply the registration to other channels, please provide the images in another folder and carefully check your file names. Additional channels need to have the same name as the registered images and a prefix indicating the channel number starting at \"C1_\". See the example below. \n","\n","Here is a common data structure that can work:\n","\n","* Data\n"," \n"," - **Fixed_image_folder**\n"," - img_1.tif (image used as template for the registration)\n"," - **Moving_image_folder**\n"," - img_3.tif, img_4.tif, ... (images to be registered) \n"," - **Folder_containing_additional_channels** (optional, if you want to apply the registration to other channel(s))\n"," - C1_img_3.tif, C1_img_4.tif, ...\n"," - C2_img_3.tif, C2_img_4.tif, ...\n"," - C3_img_3.tif, C3_img_4.tif, ...\n"," - **Results**\n","\n","The **Results** folder will contain the processed images and PDF reports. Your original images remain unmodified.\n","\n","---\n","\n"]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin"},"source":["# **1. Install DRMIME and dependencies**\n","---"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"fq21zJVFNASx"},"outputs":[],"source":["Notebook_version = '1.13'\n","Network = 'DRMIME'\n","\n","\n","from builtins import any as b_any\n","\n","def get_requirements_path():\n"," # Store requirements file in 'contents' directory\n"," current_dir = os.getcwd()\n"," dir_count = current_dir.count('/') - 1\n"," path = '../' * (dir_count) + 'requirements.txt'\n"," return path\n","\n","def filter_files(file_list, filter_list):\n"," filtered_list = []\n"," for fname in file_list:\n"," if b_any(fname.split('==')[0] in s for s in filter_list):\n"," filtered_list.append(fname)\n"," return filtered_list\n","\n","def build_requirements_file(before, after):\n"," path = get_requirements_path()\n","\n"," # Exporting requirements.txt for local run\n"," !pip freeze > $path\n","\n"," # Get minimum requirements file\n"," df = pd.read_csv(path)\n"," mod_list = [m.split('.')[0] for m in after if not m in before]\n"," req_list_temp = df.values.tolist()\n"," req_list = [x[0] for x in req_list_temp]\n","\n"," # Replace with package name and handle cases where import name is different to module name\n"," mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n"," mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]\n"," filtered_list = filter_files(req_list, mod_replace_list)\n","\n"," file=open(path,'w')\n"," for item in filtered_list:\n"," file.writelines(item)\n","\n"," file.close()\n","\n","import sys\n","before = [str(m) for m in sys.modules]\n","\n","#@markdown ##Install DRMIME and dependencies\n","\n","# Here we install DRMIME and other required packages\n","\n","!pip install wget\n","\n","from skimage import io\n","import numpy as np\n","import math\n","import matplotlib.pyplot as plt\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from torch.autograd import Variable\n","import torch.optim as optim\n","from skimage.transform import pyramid_gaussian\n","from skimage.filters import gaussian\n","from skimage.filters import threshold_otsu\n","from skimage.filters import sobel\n","from skimage.color import rgb2gray\n","from skimage import feature\n","from torch.autograd import Function\n","import cv2\n","from IPython.display import clear_output\n","import pandas as pd\n","from skimage.io import imsave\n","\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","\n","# Check if this is the latest version of the notebook\n","All_notebook_versions = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv\", dtype=str)\n","print('Notebook version: '+Notebook_version)\n","Latest_Notebook_version = All_notebook_versions[All_notebook_versions[\"Notebook\"] == Network]['Version'].iloc[0]\n","print('Latest notebook version: '+Latest_Notebook_version)\n","if Notebook_version == Latest_Notebook_version:\n"," print(\"This notebook is up-to-date.\")\n","else:\n"," print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n","\n","\n","\n","!pip freeze > requirements.txt\n","\n","\n","print(\"Libraries installed\")\n","\n","# Build requirements file for local run\n","after = [str(m) for m in sys.modules]\n","build_requirements_file(before, after)"]},{"cell_type":"markdown","metadata":{"id":"cbTknRcviyT7"},"source":["# **2. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb"},"source":["## **2.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelerator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"h5i5CS2bSmZr"},"outputs":[],"source":["#@markdown ##Run this cell to check if you have GPU access\n","#%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"]},{"cell_type":"markdown","metadata":{"id":"n3B3meGTbYVi"},"source":["## **2.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"01Djr8v-5pPk"},"outputs":[],"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"]},{"cell_type":"markdown","metadata":{"id":"HLYcZR9gMv42"},"source":["# **3. Select your parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"Kbn9_JdqnNnK"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd"},"source":[" **Paths for training, predictions and results**\n","These is the path to your folders containing the image you want to register. To find the path of the folder containing your datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`Fixed_image_folder`:** This is the folder containing your \"Fixed image\".\n","\n","**`Moving_image_folder`:** This is the folder containing your \"Moving Image(s)\".\n","\n","**`Result_folder`:** This is the folder where your results will be saved.\n","\n","\n","**Training Parameters**\n","\n","**`model_name`:** Choose a name for your model.\n","\n","**`number_of_iteration`:** Input how many iteration (rounds) the network will be trained. Preliminary results can already be observed after a 200 iterations, but a full training should run for 500-1000 iterations. **Default value: 500**\n","\n","**`Registration_mode`:** Choose which registration method you would like to use.\n","\n","**Additional channels**\n","\n"," This option enable you to apply the registration to other images (for instance other channels). Place these images in the **`Additional_channels_folder`**. Additional channels need to have the same name as the images you want to register (found in **`Moving_image_folder`**) and a prefix indicating the channel number starting at \"C1_\".\n","\n"," \n","**Advanced Parameters - experienced users only**\n","\n","**`n_neurons`:** Number of neurons (elementary constituents) that will assemble your model. **Default value: 100**.\n","\n","**`mine_initial_learning_rate`:** Input the initial value to be used as learning rate for MINE. **Default value: 0.001**\n","**`homography_net_vL_initial_learning_rate`:** Input the initial value to be used as learning rate for homography_net_vL. **Default value: 0.001**\n","\n","**`homography_net_v1_initial_learning_rate`:** Input the initial value to be used as learning rate for homography_net_v1. **Default value: 0.0001**\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"ewpNJ_I0Mv47"},"outputs":[],"source":["\n","#@markdown ###Path to the Fixed and Moving image folders: \n","Fixed_image_folder = \"\" #@param {type:\"string\"}\n","\n","\n","import os.path\n","from os import path\n","\n","if path.isfile(Fixed_image_folder):\n"," I = imread(Fixed_image_folder).astype(np.float32) # fixed image\n","\n","if path.isdir(Fixed_image_folder):\n"," Fixed_image = os.listdir(Fixed_image_folder)\n"," I = imread(Fixed_image_folder+\"/\"+Fixed_image[0]).astype(np.float32) # fixed image\n","\n","\n","Moving_image_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ### Provide the path to the folder where the predictions are to be saved\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ###Training Parameters\n","model_name = \"\" #@param {type:\"string\"}\n","\n","number_of_iteration = 500#@param {type:\"number\"}\n","\n","Registration_mode = \"Affine\" #@param [\"Affine\", \"Perspective\"]\n","\n","\n","#@markdown ###Do you want to apply the registration to other channel(s)?\n","Apply_registration_to_other_channels = False#@param {type:\"boolean\"}\n","\n","Additional_channels_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True#@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","\n","n_neurons = 100 #@param {type:\"number\"}\n","mine_initial_learning_rate = 0.001 #@param {type:\"number\"}\n","homography_net_vL_initial_learning_rate = 0.001 #@param {type:\"number\"}\n","homography_net_v1_initial_learning_rate = 0.0001 #@param {type:\"number\"}\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\") \n"," n_neurons = 100\n"," mine_initial_learning_rate = 0.001\n"," homography_net_vL_initial_learning_rate = 0.001\n"," homography_net_v1_initial_learning_rate = 0.0001\n","\n","\n","#failsafe for downscale could be useful \n","#to be added\n","\n","\n","#Load a random moving image to visualise and test the settings\n","random_choice = random.choice(os.listdir(Moving_image_folder))\n","J = imread(Moving_image_folder+\"/\"+random_choice).astype(np.float32)\n","\n","# Check if additional channel(s) need to be registered and if so how many\n","\n","print(str(len(os.listdir(Moving_image_folder)))+\" image(s) will be registered.\")\n","\n","if Apply_registration_to_other_channels:\n","\n"," other_channel_images = os.listdir(Additional_channels_folder)\n"," Number_of_other_channels = len(other_channel_images)/len(os.listdir(Moving_image_folder))\n","\n"," if Number_of_other_channels.is_integer():\n"," print(\"The registration(s) will be propagated to \"+str(Number_of_other_channels)+\" other channel(s)\")\n"," else:\n"," print(bcolors.WARNING +\"!! WARNING: Incorrect number of images in Folder_containing_additional_channels\"+W)\n","\n","#here we check that no model with the same name already exist, if so print a warning\n","if os.path.exists(Result_folder+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: \"+model_name+\" already exists and will be deleted in the following cell !!\")\n"," print(bcolors.WARNING +\"To continue training \"+model_name+\", choose a new model_name here, and load \"+model_name+\" in section 3.3\"+W)\n"," \n","\n","print(\"Example of two images to be registered\")\n","\n","#Here we display one image\n","f=plt.figure(figsize=(10,10))\n","plt.subplot(1,2,1)\n","plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest')\n","\n","\n","plt.title('Fixed image')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(J, norm=simple_norm(J, percent = 99), interpolation='nearest')\n","plt.title('Moving image')\n","plt.axis('off');\n","plt.savefig('/content/TrainingDataExample_DRMIME2D.png',bbox_inches='tight',pad_inches=0)\n","plt.show()\n","\n"]},{"cell_type":"markdown","metadata":{"id":"QpKgUER3y9tn"},"source":["## **3.2. Choose and test the image pre-processing settings**\n","---\n"," DRMIME makes use of multi-resolution image pyramids to perform registration. Unlike a conventional method where computation starts at the highest level of the image pyramid and gradually proceeds to the lower levels, DRMIME simultaneously use all the levels in gradient descent-based optimization using automatic differentiation. Here, you can choose the parameters that define the multi-resolution image pyramids that will be used.\n","\n","**`nb_images_pyramid`:** Choose the number of images to use to assemble the pyramid. **Default value: 10**.\n","\n","**`Level_downscaling`:** Choose the level of downscaling that will be used to create the images of the pyramid **Default value: 1.8**.\n","\n","**`sampling`:** amount of sampling used for the perspective registration. **Default value: 0.1**.\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"MoNXLwG6yd76"},"outputs":[],"source":["\n","#@markdown ##Image pre-processing settings\n","\n","nb_images_pyramid = 10#@param {type:\"number\"} # where registration starts (at the coarsest resolution)\n","\n","L = nb_images_pyramid\n","\n","Level_downscaling = 1.8#@param {type:\"number\"}\n","\n","downscale = Level_downscaling\n","\n","sampling = 0.1#@param {type:\"number\"} # 10% sampling used only for perspective registration\n","\n","\n","ifplot=True\n","if np.ndim(I) == 3:\n"," nChannel=I.shape[2]\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n","elif np.ndim(I) == 2:\n"," nChannel=1\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n","else:\n"," print(\"Unknown rank for an image\")\n","\n","\n","# Control the display\n","width=5\n","height=5\n","rows = int(L/5)+1\n","cols = 5\n","axes=[]\n","fig=plt.figure(figsize=(16,16))\n","\n","if Registration_mode == \"Affine\":\n","\n"," print(\"Affine registration selected\")\n","\n","# create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n","\n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_) \n"," \n"," axes.append( fig.add_subplot(rows, cols, s+1) )\n"," subplot_title=(str(s))\n"," axes[-1].set_title(subplot_title) \n"," plt.imshow(edges_grayscale)\n"," plt.axis('off');\n","\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," fig.tight_layout()\n","\n"," plt.show()\n","\n","\n","if Registration_mode == \"Perspective\":\n","\n"," print(\"Perspective registration selected\")\n","\n","# create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," \n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 10),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_) \n"," \n"," axes.append( fig.add_subplot(rows, cols, s+1) )\n"," subplot_title=(str(s))\n"," axes[-1].set_title(subplot_title) \n"," plt.imshow(edges_grayscale)\n"," plt.axis('off');\n","\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," fig.tight_layout()\n","\n"," plt.show()\n"]},{"cell_type":"markdown","metadata":{"id":"keIQhCmOMv5S"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"Ovu0ESxivcxx"},"source":["## **4.1. Prepare for training**\n","---\n","Here, we use the information from 3. to load the correct dependencies."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"t4QTv4vQvbnS"},"outputs":[],"source":["#@markdown ##Load the dependencies required for training\n","\n","print(\"--------------------------------------------------\")\n","\n","# Remove the model name folder if exists\n","\n","if os.path.exists(Result_folder+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Model folder already exists and has been removed !!\"+W)\n"," shutil.rmtree(Result_folder+'/'+model_name)\n","os.makedirs(Result_folder+'/'+model_name)\n","\n","\n","\n","if Registration_mode == \"Affine\":\n","\n"," class HomographyNet(nn.Module):\n"," def __init__(self):\n"," super(HomographyNet, self).__init__()\n"," # affine transform basis matrices\n","\n"," self.B = torch.zeros(6,3,3).to(device)\n"," self.B[0,0,2] = 1.0\n"," self.B[1,1,2] = 1.0\n"," self.B[2,0,1] = 1.0\n"," self.B[3,1,0] = 1.0\n"," self.B[4,0,0], self.B[4,1,1] = 1.0, -1.0\n"," self.B[5,1,1], self.B[5,2,2] = -1.0, 1.0\n","\n"," self.v1 = torch.nn.Parameter(torch.zeros(6,1,1).to(device), requires_grad=True)\n"," self.vL = torch.nn.Parameter(torch.zeros(6,1,1).to(device), requires_grad=True)\n","\n"," def forward(self, s):\n"," C = torch.sum(self.B*self.vL,0)\n"," if s==0:\n"," C += torch.sum(self.B*self.v1,0)\n"," A = torch.eye(3).to(device)\n"," H = A\n"," for i in torch.arange(1,10):\n"," A = torch.mm(A/i,C)\n"," H = H + A\n"," return H\n","\n"," class MINE(nn.Module): #https://arxiv.org/abs/1801.04062\n"," def __init__(self):\n"," super(MINE, self).__init__()\n"," self.fc1 = nn.Linear(2*nChannel, n_neurons)\n"," self.fc2 = nn.Linear(n_neurons, n_neurons)\n"," self.fc3 = nn.Linear(n_neurons, 1)\n"," self.bsize = 1 # 1 may be sufficient\n","\n"," def forward(self, x, ind):\n"," x = x.view(x.size()[0]*x.size()[1],x.size()[2])\n"," MI_lb=0.0\n"," for i in range(self.bsize):\n"," ind_perm = ind[torch.randperm(len(ind))]\n"," z1 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(x[ind,:])))))\n"," z2 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(torch.cat((x[ind,0:nChannel],x[ind_perm,nChannel:2*nChannel]),1))))))\n"," MI_lb += torch.mean(z1) - torch.log(torch.mean(torch.exp(z2)))\n","\n"," return MI_lb/self.bsize\n","\n"," def AffineTransform(I, H, xv, yv):\n"," # apply affine transform\n"," xvt = (xv*H[0,0]+yv*H[0,1]+H[0,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," yvt = (xv*H[1,0]+yv*H[1,1]+H[1,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," J = F.grid_sample(I,torch.stack([xvt,yvt],2).unsqueeze(0)).squeeze()\n"," return J\n","\n","\n"," def multi_resolution_loss():\n"," loss=0.0\n"," for s in np.arange(L-1,-1,-1):\n"," if nChannel>1:\n"," Jw_ = AffineTransform(J_lst[s].unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.cat([I_lst[s],Jw_],0).permute(1,2,0),ind_lst[s])\n"," loss = loss - (1./L)*mi\n"," else:\n"," Jw_ = AffineTransform(J_lst[s].unsqueeze(0).unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.stack([I_lst[s],Jw_],2),ind_lst[s])\n"," loss = loss - (1./L)*mi\n","\n"," return loss\n","\n","\n","\n","if Registration_mode == \"Perspective\":\n","\n"," class HomographyNet(nn.Module):\n"," def __init__(self):\n"," super(HomographyNet, self).__init__()\n"," # affine transform basis matrices\n","\n"," self.B = torch.zeros(8,3,3).to(device)\n"," self.B[0,0,2] = 1.0\n"," self.B[1,1,2] = 1.0\n"," self.B[2,0,1] = 1.0\n"," self.B[3,1,0] = 1.0\n"," self.B[4,0,0], self.B[4,1,1] = 1.0, -1.0\n"," self.B[5,1,1], self.B[5,2,2] = -1.0, 1.0\n"," self.B[6,2,0] = 1.0\n"," self.B[7,2,1] = 1.0\n","\n"," self.v1 = torch.nn.Parameter(torch.zeros(8,1,1).to(device), requires_grad=True)\n"," self.vL = torch.nn.Parameter(torch.zeros(8,1,1).to(device), requires_grad=True)\n","\n"," def forward(self, s):\n"," C = torch.sum(self.B*self.vL,0)\n"," if s==0:\n"," C += torch.sum(self.B*self.v1,0)\n"," A = torch.eye(3).to(device)\n"," H = A\n"," for i in torch.arange(1,10):\n"," A = torch.mm(A/i,C)\n"," H = H + A\n"," return H\n","\n","\n"," class MINE(nn.Module): #https://arxiv.org/abs/1801.04062\n"," def __init__(self):\n"," super(MINE, self).__init__()\n"," self.fc1 = nn.Linear(2*nChannel, n_neurons)\n"," self.fc2 = nn.Linear(n_neurons, n_neurons)\n"," self.fc3 = nn.Linear(n_neurons, 1)\n"," self.bsize = 1 # 1 may be sufficient\n","\n"," def forward(self, x, ind):\n"," x = x.view(x.size()[0]*x.size()[1],x.size()[2])\n"," MI_lb=0.0\n"," for i in range(self.bsize):\n"," ind_perm = ind[torch.randperm(len(ind))]\n"," z1 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(x[ind,:])))))\n"," z2 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(torch.cat((x[ind,0:nChannel],x[ind_perm,nChannel:2*nChannel]),1))))))\n"," MI_lb += torch.mean(z1) - torch.log(torch.mean(torch.exp(z2)))\n","\n"," return MI_lb/self.bsize\n","\n","\n"," def PerspectiveTransform(I, H, xv, yv):\n"," # apply homography\n"," xvt = (xv*H[0,0]+yv*H[0,1]+H[0,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," yvt = (xv*H[1,0]+yv*H[1,1]+H[1,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," J = F.grid_sample(I,torch.stack([xvt,yvt],2).unsqueeze(0)).squeeze()\n"," return J\n","\n","\n"," def multi_resolution_loss():\n"," loss=0.0\n"," for s in np.arange(L-1,-1,-1):\n"," if nChannel>1:\n"," Jw_ = PerspectiveTransform(J_lst[s].unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.cat([I_lst[s],Jw_],0).permute(1,2,0),ind_lst[s])\n"," loss = loss - (1./L)*mi\n"," else:\n"," Jw_ = PerspectiveTransform(J_lst[s].unsqueeze(0).unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.stack([I_lst[s],Jw_],2),ind_lst[s])\n"," loss = loss - (1./L)*mi\n","\n"," return loss\n","\n"," def histogram_mutual_information(image1, image2):\n"," hgram, x_edges, y_edges = np.histogram2d(image1.ravel(), image2.ravel(), bins=100)\n"," pxy = hgram / float(np.sum(hgram))\n"," px = np.sum(pxy, axis=1)\n"," py = np.sum(pxy, axis=0)\n"," px_py = px[:, None] * py[None, :]\n"," nzs = pxy > 0\n"," return np.sum(pxy[nzs] * np.log(pxy[nzs] / px_py[nzs]))\n","\n","\n","print(\"Done\")\n"]},{"cell_type":"markdown","metadata":{"id":"0Dfn8ZsEMv5d"},"source":["## **4.2. Start Trainning**\n","---\n","When playing the cell below you should see updates after each iterations (round). A new network will be trained for each image that need to be registered.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point.\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"fisJmA13Mv5e","scrolled":true},"outputs":[],"source":["#@markdown ##Start training and the registration process\n","\n","start = time.time()\n","\n","loop_number = 1\n","\n","\n","\n","if Registration_mode == \"Affine\":\n","\n"," print(\"Affine registration.....\")\n","\n"," for image in os.listdir(Moving_image_folder):\n","\n"," if path.isfile(Fixed_image_folder):\n"," I = imread(Fixed_image_folder).astype(np.float32) # fixed image\n","\n"," if path.isdir(Fixed_image_folder):\n"," Fixed_image = os.listdir(Fixed_image_folder)\n"," I = imread(Fixed_image_folder+\"/\"+Fixed_image[0]).astype(np.float32) # fixed image\n","\n"," J = imread(Moving_image_folder+\"/\"+image).astype(np.float32)\n","\n"," # Here we generate the pyramidal images\n"," ifplot=True\n"," if np.ndim(I) == 3:\n"," nChannel=I.shape[2]\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," elif np.ndim(I) == 2:\n"," nChannel=1\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," else:\n"," print(\"Unknown rank for an image\")\n","\n","\n"," # create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n","\n","\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n","\n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_)\n","\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," homography_net = HomographyNet().to(device)\n"," mine_net = MINE().to(device)\n","\n"," optimizer = optim.Adam([{'params': mine_net.parameters(), 'lr': 1e-3},\n"," {'params': homography_net.vL, 'lr': 5e-3},\n"," {'params': homography_net.v1, 'lr': 1e-4}], amsgrad=True)\n"," mi_list = []\n"," for itr in range(number_of_iteration):\n"," optimizer.zero_grad()\n"," loss = multi_resolution_loss()\n"," mi_list.append(-loss.item())\n"," loss.backward()\n"," optimizer.step()\n"," clear_output(wait=True)\n"," plt.plot(mi_list)\n"," plt.xlabel('Iteration number')\n"," plt.ylabel('MI')\n"," plt.title(image+\". Image registration \"+str(loop_number)+\" out of \"+str(len(os.listdir(Moving_image_folder)))+\".\")\n"," plt.show()\n","\n"," I_t = torch.tensor(I).to(device) # without Gaussian\n"," J_t = torch.tensor(J).to(device) # without Gaussian\n"," H = homography_net(0)\n"," if nChannel>1:\n"," J_w = AffineTransform(J_t.permute(2,0,1).unsqueeze(0), H, xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze().permute(1,2,0)\n"," else:\n"," J_w = AffineTransform(J_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n","\n"," #Apply registration to other channels\n","\n"," if Apply_registration_to_other_channels:\n","\n"," for n_channel in range(1, int(Number_of_other_channels)+1):\n","\n"," channel = imread(Additional_channels_folder+\"/C\"+str(n_channel)+\"_\"+image).astype(np.float32)\n"," channel_t = torch.tensor(channel).to(device)\n"," channel_w = AffineTransform(channel_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n"," channel_registered = channel_w.cpu().data.numpy()\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+\"C\"+str(n_channel)+\"_\"+image+\"_\"+Registration_mode+\"_registered.tif\", channel_registered)\n"," \n","# Export results to numpy array\n"," registered = J_w.cpu().data.numpy()\n","# Save results\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+image+\"_\"+Registration_mode+\"_registered.tif\", registered)\n","\n"," loop_number = loop_number + 1\n","\n"," print(\"Your images have been registered and saved in your result_folder\")\n","\n","\n","#Perspective registration\n","\n","if Registration_mode == \"Perspective\":\n","\n"," print(\"Perspective registration.....\")\n","\n"," for image in os.listdir(Moving_image_folder):\n","\n"," if path.isfile(Fixed_image_folder):\n"," I = imread(Fixed_image_folder).astype(np.float32) # fixed image\n","\n"," if path.isdir(Fixed_image_folder):\n"," Fixed_image = os.listdir(Fixed_image_folder)\n"," I = imread(Fixed_image).astype(np.float32) # fixed image\n","\n"," J = imread(Moving_image_folder+\"/\"+image).astype(np.float32)\n","\n"," # Here we generate the pyramidal images\n"," ifplot=True\n"," if np.ndim(I) == 3:\n"," nChannel=I.shape[2]\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," elif np.ndim(I) == 2:\n"," nChannel=1\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," else:\n"," print(\"Unknown rank for an image\")\n","\n","\n"," # create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n","\n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 10),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_)\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," homography_net = HomographyNet().to(device)\n"," mine_net = MINE().to(device)\n","\n"," optimizer = optim.Adam([{'params': mine_net.parameters(), 'lr': 1e-3},\n"," {'params': homography_net.vL, 'lr': 1e-3},\n"," {'params': homography_net.v1, 'lr': 1e-4}], amsgrad=True)\n"," mi_list = []\n"," for itr in range(number_of_iteration):\n"," optimizer.zero_grad()\n"," loss = multi_resolution_loss()\n"," mi_list.append(-loss.item())\n"," loss.backward()\n"," optimizer.step()\n"," clear_output(wait=True)\n"," plt.plot(mi_list)\n"," plt.xlabel('Iteration number')\n"," plt.ylabel('MI')\n"," plt.title(image+\". Image registration \"+str(loop_number)+\" out of \"+str(len(os.listdir(Moving_image_folder)))+\".\")\n"," plt.show()\n","\n"," I_t = torch.tensor(I).to(device) # without Gaussian\n"," J_t = torch.tensor(J).to(device) # without Gaussian\n"," H = homography_net(0)\n"," if nChannel>1:\n"," J_w = PerspectiveTransform(J_t.permute(2,0,1).unsqueeze(0), H, xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze().permute(1,2,0)\n"," else:\n"," J_w = PerspectiveTransform(J_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n","\n"," #Apply registration to other channels\n","\n"," if Apply_registration_to_other_channels:\n","\n"," for n_channel in range(1, int(Number_of_other_channels)+1):\n","\n"," channel = imread(Additional_channels_folder+\"/C\"+str(n_channel)+\"_\"+image).astype(np.float32)\n"," channel_t = torch.tensor(channel).to(device)\n"," channel_w = PerspectiveTransform(channel_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n"," channel_registered = channel_w.cpu().data.numpy()\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+\"C\"+str(n_channel)+\"_\"+image+\"_Perspective_registered.tif\", channel_registered) \n","\n","\n","# Export results to numpy array\n"," registered = J_w.cpu().data.numpy()\n","# Save results\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+image+\"_Perspective_registered.tif\", registered)\n","\n"," loop_number = loop_number + 1\n","\n"," print(\"Your images have been registered and saved in your result_folder\")\n","\n","\n","# PDF export missing \n","\n","#pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)\n","\n"]},{"cell_type":"markdown","metadata":{"id":"PfTw_pQUUAqB"},"source":["## **4.3. Assess the registration**\n","---\n","\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"SrArBvqwYvc9"},"outputs":[],"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# For sliders and dropdown menu and progress bar\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","\n","print('--------------------------------------------------------------')\n","@interact\n","def show_QC_results(file = os.listdir(Moving_image_folder)):\n","\n"," moving_image = imread(Moving_image_folder+\"/\"+file).astype(np.float32)\n"," \n"," registered_image = imread(Result_folder+\"/\"+model_name+\"/\"+file+\"_\"+Registration_mode+\"_registered.tif\").astype(np.float32)\n","\n","#Here we display one image\n","\n"," f=plt.figure(figsize=(20,20))\n"," plt.subplot(1,5,1)\n"," plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest')\n"," plt.title('Fixed image')\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,2)\n"," plt.imshow(moving_image, norm=simple_norm(moving_image, percent = 99), interpolation='nearest')\n"," plt.title('Moving image')\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,3)\n"," plt.imshow(registered_image, norm=simple_norm(registered_image, percent = 99), interpolation='nearest')\n"," plt.title(\"Registered image\")\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,4)\n"," plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest', cmap=\"Greens\")\n"," plt.imshow(moving_image, norm=simple_norm(moving_image, percent = 99), interpolation='nearest', cmap=\"Oranges\", alpha=0.5)\n"," plt.title(\"Fixed and moving images\")\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,5)\n"," plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest', cmap=\"Greens\")\n"," plt.imshow(registered_image, norm=simple_norm(registered_image, percent = 99), interpolation='nearest', cmap=\"Oranges\", alpha=0.5)\n"," plt.title(\"Fixed and Registered images\")\n"," plt.axis('off');\n","\n"," plt.show()"]},{"cell_type":"markdown","metadata":{"id":"wgO7Ok1PBFQj"},"source":["## **4.4. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"XXsUh88HqYay"},"source":["# **5. Version log**\n","---\n","**v1.13**: \n","\n","* This version now includes built-in version check and the version log that you're reading now."]},{"cell_type":"markdown","metadata":{"id":"nlyPYwZu4VVS"},"source":["#**Thank you for using DRMIME 2D!**"]}],"metadata":{"accelerator":"GPU","colab":{"collapsed_sections":[],"name":"DRMIME_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1hzAI0joLETcG5sI2Qvo8AKDr0TWRKySJ","timestamp":1587653755731},{"file_id":"1QFcz4NnQv4rMwDNl7AzHajN-Ola9sUFW","timestamp":1586411847878},{"file_id":"12UDRQ7abcnXcf5FctR9IUStgCpBiQWn7","timestamp":1584466922281},{"file_id":"1zXCn3A39GI1MCnXK_g_Z-AWh9vkB0YhU","timestamp":1583244415636}],"toc_visible":true},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.9"}},"nbformat":4,"nbformat_minor":0} +{"cells":[{"cell_type":"markdown","metadata":{"id":"IkSguVy8Xv83"},"source":["# **DRMIME (2D)**\n","\n","---\n","\n"," DRMIME is a self-supervised deep-learning method that can be used to register 2D images.\n","\n"," **This particular notebook enables self-supervised registration of 2D dataset.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories. \n","\n","\n","While this notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (ZeroCostDL4Mic), this notebook structure substantially deviates from other ZeroCostDL4Mic notebooks and our template. This is because the deep learning method employed here is used to improve the image registration process. No Deep Learning models are actually saved, only the registered images. \n","\n","\n","This notebook is largely based on the following paper:\n","\n","DRMIME: Differentiable Mutual Information and Matrix Exponential for Multi-Resolution Image Registration by Abhishek Nan\n"," *et al.* published on arXiv in 2020 (https://arxiv.org/abs/2001.09865)\n","\n","And source code found in: https://github.com/abnan/DRMIME\n","\n","**Please also cite this original paper when using or developing this notebook.**\n"]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"gKDLkLWUd-YX"},"source":["# **0. Before getting started**\n","---\n","\n","Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.\n","\n","For DRMIME to train, it requires at least two images. One **`\"Fixed image\"`** (template for the registration) and one **`Moving Image`** (image to be registered). Multiple **`Moving Images`** can also be provided if you want to register them to the same **`\"Fixed image\"`**. If you provide several **`Moving Images`**, multiple DRMIME instances will run one after another. \n","\n","The registration can also be applied to other channels. If you wish to apply the registration to other channels, please provide the images in another folder and carefully check your file names. Additional channels need to have the same name as the registered images and a prefix indicating the channel number starting at \"C1_\". See the example below. \n","\n","Here is a common data structure that can work:\n","\n","* Data\n"," \n"," - **Fixed_image_folder**\n"," - img_1.tif (image used as template for the registration)\n"," - **Moving_image_folder**\n"," - img_3.tif, img_4.tif, ... (images to be registered) \n"," - **Folder_containing_additional_channels** (optional, if you want to apply the registration to other channel(s))\n"," - C1_img_3.tif, C1_img_4.tif, ...\n"," - C2_img_3.tif, C2_img_4.tif, ...\n"," - C3_img_3.tif, C3_img_4.tif, ...\n"," - **Results**\n","\n","The **Results** folder will contain the processed images and PDF reports. Your original images remain unmodified.\n","\n","---\n","\n"]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin"},"source":["# **1. Install DRMIME and dependencies**\n","---"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"fq21zJVFNASx"},"outputs":[],"source":["Notebook_version = '1.13'\n","Network = 'DRMIME'\n","\n","\n","from builtins import any as b_any\n","\n","def get_requirements_path():\n"," # Store requirements file in 'base_path' directory\n"," current_dir = os.getcwd()\n"," dir_count = current_dir.count('/') - 1\n"," path = '../' * (dir_count) + 'requirements.txt'\n"," return path\n","\n","def filter_files(file_list, filter_list):\n"," filtered_list = []\n"," for fname in file_list:\n"," if b_any(fname.split('==')[0] in s for s in filter_list):\n"," filtered_list.append(fname)\n"," return filtered_list\n","\n","def build_requirements_file(before, after):\n"," path = get_requirements_path()\n","\n"," # Exporting requirements.txt for local run\n"," !pip freeze > $path\n","\n"," # Get minimum requirements file\n"," df = pd.read_csv(path)\n"," mod_list = [m.split('.')[0] for m in after if not m in before]\n"," req_list_temp = df.values.tolist()\n"," req_list = [x[0] for x in req_list_temp]\n","\n"," # Replace with package name and handle cases where import name is different to module name\n"," mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n"," mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]\n"," filtered_list = filter_files(req_list, mod_replace_list)\n","\n"," file=open(path,'w')\n"," for item in filtered_list:\n"," file.writelines(item)\n","\n"," file.close()\n","\n","import sys\n","before = [str(m) for m in sys.modules]\n","\n","#@markdown ##Install DRMIME and dependencies\n","\n","# Here we install DRMIME and other required packages\n","\n","!pip install wget\n","\n","from skimage import io\n","import numpy as np\n","import math\n","import matplotlib.pyplot as plt\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from torch.autograd import Variable\n","import torch.optim as optim\n","from skimage.transform import pyramid_gaussian\n","from skimage.filters import gaussian\n","from skimage.filters import threshold_otsu\n","from skimage.filters import sobel\n","from skimage.color import rgb2gray\n","from skimage import feature\n","from torch.autograd import Function\n","import cv2\n","from IPython.display import clear_output\n","import pandas as pd\n","from skimage.io import imsave\n","\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","\n","\n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n","import wget\n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n","\n","#Create a variable to get and store relative base path\n","base_path = os.getcwd()\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n","\n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n","\n","\n","# Check if this is the latest version of the notebook\n","All_notebook_versions = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv\", dtype=str)\n","print('Notebook version: '+Notebook_version)\n","Latest_Notebook_version = All_notebook_versions[All_notebook_versions[\"Notebook\"] == Network]['Version'].iloc[0]\n","print('Latest notebook version: '+Latest_Notebook_version)\n","if Notebook_version == Latest_Notebook_version:\n"," print(\"This notebook is up-to-date.\")\n","else:\n"," print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n","\n","\n","\n","!pip freeze > requirements.txt\n","\n","\n","print(\"Libraries installed\")\n","\n","# Build requirements file for local run\n","after = [str(m) for m in sys.modules]\n","build_requirements_file(before, after)"]},{"cell_type":"markdown","metadata":{"id":"cbTknRcviyT7"},"source":["# **2. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb"},"source":["## **2.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelerator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"h5i5CS2bSmZr"},"outputs":[],"source":["#@markdown ##Run this cell to check if you have GPU access\n","#%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"]},{"cell_type":"markdown","metadata":{"id":"n3B3meGTbYVi"},"source":["## **2.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"01Djr8v-5pPk"},"outputs":[],"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount(base_path + '/gdrive')"]},{"cell_type":"markdown","metadata":{"id":"HLYcZR9gMv42"},"source":["# **3. Select your parameters and paths**\n","---"]},{"cell_type":"markdown","metadata":{"id":"Kbn9_JdqnNnK"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd"},"source":[" **Paths for training, predictions and results**\n","These is the path to your folders containing the image you want to register. To find the path of the folder containing your datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`Fixed_image_folder`:** This is the folder containing your \"Fixed image\".\n","\n","**`Moving_image_folder`:** This is the folder containing your \"Moving Image(s)\".\n","\n","**`Result_folder`:** This is the folder where your results will be saved.\n","\n","\n","**Training Parameters**\n","\n","**`model_name`:** Choose a name for your model.\n","\n","**`number_of_iteration`:** Input how many iteration (rounds) the network will be trained. Preliminary results can already be observed after a 200 iterations, but a full training should run for 500-1000 iterations. **Default value: 500**\n","\n","**`Registration_mode`:** Choose which registration method you would like to use.\n","\n","**Additional channels**\n","\n"," This option enable you to apply the registration to other images (for instance other channels). Place these images in the **`Additional_channels_folder`**. Additional channels need to have the same name as the images you want to register (found in **`Moving_image_folder`**) and a prefix indicating the channel number starting at \"C1_\".\n","\n"," \n","**Advanced Parameters - experienced users only**\n","\n","**`n_neurons`:** Number of neurons (elementary constituents) that will assemble your model. **Default value: 100**.\n","\n","**`mine_initial_learning_rate`:** Input the initial value to be used as learning rate for MINE. **Default value: 0.001**\n","**`homography_net_vL_initial_learning_rate`:** Input the initial value to be used as learning rate for homography_net_vL. **Default value: 0.001**\n","\n","**`homography_net_v1_initial_learning_rate`:** Input the initial value to be used as learning rate for homography_net_v1. **Default value: 0.0001**\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"ewpNJ_I0Mv47"},"outputs":[],"source":["\n","#@markdown ###Path to the Fixed and Moving image folders: \n","Fixed_image_folder = \"\" #@param {type:\"string\"}\n","\n","\n","import os.path\n","from os import path\n","\n","if path.isfile(Fixed_image_folder):\n"," I = imread(Fixed_image_folder).astype(np.float32) # fixed image\n","\n","if path.isdir(Fixed_image_folder):\n"," Fixed_image = os.listdir(Fixed_image_folder)\n"," I = imread(Fixed_image_folder+\"/\"+Fixed_image[0]).astype(np.float32) # fixed image\n","\n","\n","Moving_image_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ### Provide the path to the folder where the predictions are to be saved\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ###Training Parameters\n","model_name = \"\" #@param {type:\"string\"}\n","\n","number_of_iteration = 500#@param {type:\"number\"}\n","\n","Registration_mode = \"Affine\" #@param [\"Affine\", \"Perspective\"]\n","\n","\n","#@markdown ###Do you want to apply the registration to other channel(s)?\n","Apply_registration_to_other_channels = False#@param {type:\"boolean\"}\n","\n","Additional_channels_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True#@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","\n","n_neurons = 100 #@param {type:\"number\"}\n","mine_initial_learning_rate = 0.001 #@param {type:\"number\"}\n","homography_net_vL_initial_learning_rate = 0.001 #@param {type:\"number\"}\n","homography_net_v1_initial_learning_rate = 0.0001 #@param {type:\"number\"}\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\") \n"," n_neurons = 100\n"," mine_initial_learning_rate = 0.001\n"," homography_net_vL_initial_learning_rate = 0.001\n"," homography_net_v1_initial_learning_rate = 0.0001\n","\n","\n","#failsafe for downscale could be useful \n","#to be added\n","\n","\n","#Load a random moving image to visualise and test the settings\n","random_choice = random.choice(os.listdir(Moving_image_folder))\n","J = imread(Moving_image_folder+\"/\"+random_choice).astype(np.float32)\n","\n","# Check if additional channel(s) need to be registered and if so how many\n","\n","print(str(len(os.listdir(Moving_image_folder)))+\" image(s) will be registered.\")\n","\n","if Apply_registration_to_other_channels:\n","\n"," other_channel_images = os.listdir(Additional_channels_folder)\n"," Number_of_other_channels = len(other_channel_images)/len(os.listdir(Moving_image_folder))\n","\n"," if Number_of_other_channels.is_integer():\n"," print(\"The registration(s) will be propagated to \"+str(Number_of_other_channels)+\" other channel(s)\")\n"," else:\n"," print(bcolors.WARNING +\"!! WARNING: Incorrect number of images in Folder_containing_additional_channels\"+W)\n","\n","#here we check that no model with the same name already exist, if so print a warning\n","if os.path.exists(Result_folder+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: \"+model_name+\" already exists and will be deleted in the following cell !!\")\n"," print(bcolors.WARNING +\"To continue training \"+model_name+\", choose a new model_name here, and load \"+model_name+\" in section 3.3\"+W)\n"," \n","\n","print(\"Example of two images to be registered\")\n","\n","#Here we display one image\n","f=plt.figure(figsize=(10,10))\n","plt.subplot(1,2,1)\n","plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest')\n","\n","\n","plt.title('Fixed image')\n","plt.axis('off');\n","\n","plt.subplot(1,2,2)\n","plt.imshow(J, norm=simple_norm(J, percent = 99), interpolation='nearest')\n","plt.title('Moving image')\n","plt.axis('off');\n","plt.savefig(base_path + '/TrainingDataExample_DRMIME2D.png',bbox_inches='tight',pad_inches=0)\n","plt.show()\n","\n"]},{"cell_type":"markdown","metadata":{"id":"QpKgUER3y9tn"},"source":["## **3.2. Choose and test the image pre-processing settings**\n","---\n"," DRMIME makes use of multi-resolution image pyramids to perform registration. Unlike a conventional method where computation starts at the highest level of the image pyramid and gradually proceeds to the lower levels, DRMIME simultaneously use all the levels in gradient descent-based optimization using automatic differentiation. Here, you can choose the parameters that define the multi-resolution image pyramids that will be used.\n","\n","**`nb_images_pyramid`:** Choose the number of images to use to assemble the pyramid. **Default value: 10**.\n","\n","**`Level_downscaling`:** Choose the level of downscaling that will be used to create the images of the pyramid **Default value: 1.8**.\n","\n","**`sampling`:** amount of sampling used for the perspective registration. **Default value: 0.1**.\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"MoNXLwG6yd76"},"outputs":[],"source":["\n","#@markdown ##Image pre-processing settings\n","\n","nb_images_pyramid = 10#@param {type:\"number\"} # where registration starts (at the coarsest resolution)\n","\n","L = nb_images_pyramid\n","\n","Level_downscaling = 1.8#@param {type:\"number\"}\n","\n","downscale = Level_downscaling\n","\n","sampling = 0.1#@param {type:\"number\"} # 10% sampling used only for perspective registration\n","\n","\n","ifplot=True\n","if np.ndim(I) == 3:\n"," nChannel=I.shape[2]\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n","elif np.ndim(I) == 2:\n"," nChannel=1\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n","else:\n"," print(\"Unknown rank for an image\")\n","\n","\n","# Control the display\n","width=5\n","height=5\n","rows = int(L/5)+1\n","cols = 5\n","axes=[]\n","fig=plt.figure(figsize=(16,16))\n","\n","if Registration_mode == \"Affine\":\n","\n"," print(\"Affine registration selected\")\n","\n","# create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n","\n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_) \n"," \n"," axes.append( fig.add_subplot(rows, cols, s+1) )\n"," subplot_title=(str(s))\n"," axes[-1].set_title(subplot_title) \n"," plt.imshow(edges_grayscale)\n"," plt.axis('off');\n","\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," fig.tight_layout()\n","\n"," plt.show()\n","\n","\n","if Registration_mode == \"Perspective\":\n","\n"," print(\"Perspective registration selected\")\n","\n","# create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," \n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 10),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_) \n"," \n"," axes.append( fig.add_subplot(rows, cols, s+1) )\n"," subplot_title=(str(s))\n"," axes[-1].set_title(subplot_title) \n"," plt.imshow(edges_grayscale)\n"," plt.axis('off');\n","\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," fig.tight_layout()\n","\n"," plt.show()\n"]},{"cell_type":"markdown","metadata":{"id":"keIQhCmOMv5S"},"source":["# **4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"Ovu0ESxivcxx"},"source":["## **4.1. Prepare for training**\n","---\n","Here, we use the information from 3. to load the correct dependencies."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"t4QTv4vQvbnS"},"outputs":[],"source":["#@markdown ##Load the dependencies required for training\n","\n","print(\"--------------------------------------------------\")\n","\n","# Remove the model name folder if exists\n","\n","if os.path.exists(Result_folder+'/'+model_name):\n"," print(bcolors.WARNING +\"!! WARNING: Model folder already exists and has been removed !!\"+W)\n"," shutil.rmtree(Result_folder+'/'+model_name)\n","os.makedirs(Result_folder+'/'+model_name)\n","\n","\n","\n","if Registration_mode == \"Affine\":\n","\n"," class HomographyNet(nn.Module):\n"," def __init__(self):\n"," super(HomographyNet, self).__init__()\n"," # affine transform basis matrices\n","\n"," self.B = torch.zeros(6,3,3).to(device)\n"," self.B[0,0,2] = 1.0\n"," self.B[1,1,2] = 1.0\n"," self.B[2,0,1] = 1.0\n"," self.B[3,1,0] = 1.0\n"," self.B[4,0,0], self.B[4,1,1] = 1.0, -1.0\n"," self.B[5,1,1], self.B[5,2,2] = -1.0, 1.0\n","\n"," self.v1 = torch.nn.Parameter(torch.zeros(6,1,1).to(device), requires_grad=True)\n"," self.vL = torch.nn.Parameter(torch.zeros(6,1,1).to(device), requires_grad=True)\n","\n"," def forward(self, s):\n"," C = torch.sum(self.B*self.vL,0)\n"," if s==0:\n"," C += torch.sum(self.B*self.v1,0)\n"," A = torch.eye(3).to(device)\n"," H = A\n"," for i in torch.arange(1,10):\n"," A = torch.mm(A/i,C)\n"," H = H + A\n"," return H\n","\n"," class MINE(nn.Module): #https://arxiv.org/abs/1801.04062\n"," def __init__(self):\n"," super(MINE, self).__init__()\n"," self.fc1 = nn.Linear(2*nChannel, n_neurons)\n"," self.fc2 = nn.Linear(n_neurons, n_neurons)\n"," self.fc3 = nn.Linear(n_neurons, 1)\n"," self.bsize = 1 # 1 may be sufficient\n","\n"," def forward(self, x, ind):\n"," x = x.view(x.size()[0]*x.size()[1],x.size()[2])\n"," MI_lb=0.0\n"," for i in range(self.bsize):\n"," ind_perm = ind[torch.randperm(len(ind))]\n"," z1 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(x[ind,:])))))\n"," z2 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(torch.cat((x[ind,0:nChannel],x[ind_perm,nChannel:2*nChannel]),1))))))\n"," MI_lb += torch.mean(z1) - torch.log(torch.mean(torch.exp(z2)))\n","\n"," return MI_lb/self.bsize\n","\n"," def AffineTransform(I, H, xv, yv):\n"," # apply affine transform\n"," xvt = (xv*H[0,0]+yv*H[0,1]+H[0,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," yvt = (xv*H[1,0]+yv*H[1,1]+H[1,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," J = F.grid_sample(I,torch.stack([xvt,yvt],2).unsqueeze(0)).squeeze()\n"," return J\n","\n","\n"," def multi_resolution_loss():\n"," loss=0.0\n"," for s in np.arange(L-1,-1,-1):\n"," if nChannel>1:\n"," Jw_ = AffineTransform(J_lst[s].unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.cat([I_lst[s],Jw_],0).permute(1,2,0),ind_lst[s])\n"," loss = loss - (1./L)*mi\n"," else:\n"," Jw_ = AffineTransform(J_lst[s].unsqueeze(0).unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.stack([I_lst[s],Jw_],2),ind_lst[s])\n"," loss = loss - (1./L)*mi\n","\n"," return loss\n","\n","\n","\n","if Registration_mode == \"Perspective\":\n","\n"," class HomographyNet(nn.Module):\n"," def __init__(self):\n"," super(HomographyNet, self).__init__()\n"," # affine transform basis matrices\n","\n"," self.B = torch.zeros(8,3,3).to(device)\n"," self.B[0,0,2] = 1.0\n"," self.B[1,1,2] = 1.0\n"," self.B[2,0,1] = 1.0\n"," self.B[3,1,0] = 1.0\n"," self.B[4,0,0], self.B[4,1,1] = 1.0, -1.0\n"," self.B[5,1,1], self.B[5,2,2] = -1.0, 1.0\n"," self.B[6,2,0] = 1.0\n"," self.B[7,2,1] = 1.0\n","\n"," self.v1 = torch.nn.Parameter(torch.zeros(8,1,1).to(device), requires_grad=True)\n"," self.vL = torch.nn.Parameter(torch.zeros(8,1,1).to(device), requires_grad=True)\n","\n"," def forward(self, s):\n"," C = torch.sum(self.B*self.vL,0)\n"," if s==0:\n"," C += torch.sum(self.B*self.v1,0)\n"," A = torch.eye(3).to(device)\n"," H = A\n"," for i in torch.arange(1,10):\n"," A = torch.mm(A/i,C)\n"," H = H + A\n"," return H\n","\n","\n"," class MINE(nn.Module): #https://arxiv.org/abs/1801.04062\n"," def __init__(self):\n"," super(MINE, self).__init__()\n"," self.fc1 = nn.Linear(2*nChannel, n_neurons)\n"," self.fc2 = nn.Linear(n_neurons, n_neurons)\n"," self.fc3 = nn.Linear(n_neurons, 1)\n"," self.bsize = 1 # 1 may be sufficient\n","\n"," def forward(self, x, ind):\n"," x = x.view(x.size()[0]*x.size()[1],x.size()[2])\n"," MI_lb=0.0\n"," for i in range(self.bsize):\n"," ind_perm = ind[torch.randperm(len(ind))]\n"," z1 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(x[ind,:])))))\n"," z2 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(torch.cat((x[ind,0:nChannel],x[ind_perm,nChannel:2*nChannel]),1))))))\n"," MI_lb += torch.mean(z1) - torch.log(torch.mean(torch.exp(z2)))\n","\n"," return MI_lb/self.bsize\n","\n","\n"," def PerspectiveTransform(I, H, xv, yv):\n"," # apply homography\n"," xvt = (xv*H[0,0]+yv*H[0,1]+H[0,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," yvt = (xv*H[1,0]+yv*H[1,1]+H[1,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])\n"," J = F.grid_sample(I,torch.stack([xvt,yvt],2).unsqueeze(0)).squeeze()\n"," return J\n","\n","\n"," def multi_resolution_loss():\n"," loss=0.0\n"," for s in np.arange(L-1,-1,-1):\n"," if nChannel>1:\n"," Jw_ = PerspectiveTransform(J_lst[s].unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.cat([I_lst[s],Jw_],0).permute(1,2,0),ind_lst[s])\n"," loss = loss - (1./L)*mi\n"," else:\n"," Jw_ = PerspectiveTransform(J_lst[s].unsqueeze(0).unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()\n"," mi = mine_net(torch.stack([I_lst[s],Jw_],2),ind_lst[s])\n"," loss = loss - (1./L)*mi\n","\n"," return loss\n","\n"," def histogram_mutual_information(image1, image2):\n"," hgram, x_edges, y_edges = np.histogram2d(image1.ravel(), image2.ravel(), bins=100)\n"," pxy = hgram / float(np.sum(hgram))\n"," px = np.sum(pxy, axis=1)\n"," py = np.sum(pxy, axis=0)\n"," px_py = px[:, None] * py[None, :]\n"," nzs = pxy > 0\n"," return np.sum(pxy[nzs] * np.log(pxy[nzs] / px_py[nzs]))\n","\n","\n","print(\"Done\")\n"]},{"cell_type":"markdown","metadata":{"id":"0Dfn8ZsEMv5d"},"source":["## **4.2. Start Trainning**\n","---\n","When playing the cell below you should see updates after each iterations (round). A new network will be trained for each image that need to be registered.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point.\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"fisJmA13Mv5e","scrolled":true},"outputs":[],"source":["#@markdown ##Start training and the registration process\n","\n","start = time.time()\n","\n","loop_number = 1\n","\n","\n","\n","if Registration_mode == \"Affine\":\n","\n"," print(\"Affine registration.....\")\n","\n"," for image in os.listdir(Moving_image_folder):\n","\n"," if path.isfile(Fixed_image_folder):\n"," I = imread(Fixed_image_folder).astype(np.float32) # fixed image\n","\n"," if path.isdir(Fixed_image_folder):\n"," Fixed_image = os.listdir(Fixed_image_folder)\n"," I = imread(Fixed_image_folder+\"/\"+Fixed_image[0]).astype(np.float32) # fixed image\n","\n"," J = imread(Moving_image_folder+\"/\"+image).astype(np.float32)\n","\n"," # Here we generate the pyramidal images\n"," ifplot=True\n"," if np.ndim(I) == 3:\n"," nChannel=I.shape[2]\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," elif np.ndim(I) == 2:\n"," nChannel=1\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," else:\n"," print(\"Unknown rank for an image\")\n","\n","\n"," # create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n","\n","\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n","\n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]\n"," ind_lst.append(ind_)\n","\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," homography_net = HomographyNet().to(device)\n"," mine_net = MINE().to(device)\n","\n"," optimizer = optim.Adam([{'params': mine_net.parameters(), 'lr': 1e-3},\n"," {'params': homography_net.vL, 'lr': 5e-3},\n"," {'params': homography_net.v1, 'lr': 1e-4}], amsgrad=True)\n"," mi_list = []\n"," for itr in range(number_of_iteration):\n"," optimizer.zero_grad()\n"," loss = multi_resolution_loss()\n"," mi_list.append(-loss.item())\n"," loss.backward()\n"," optimizer.step()\n"," clear_output(wait=True)\n"," plt.plot(mi_list)\n"," plt.xlabel('Iteration number')\n"," plt.ylabel('MI')\n"," plt.title(image+\". Image registration \"+str(loop_number)+\" out of \"+str(len(os.listdir(Moving_image_folder)))+\".\")\n"," plt.show()\n","\n"," I_t = torch.tensor(I).to(device) # without Gaussian\n"," J_t = torch.tensor(J).to(device) # without Gaussian\n"," H = homography_net(0)\n"," if nChannel>1:\n"," J_w = AffineTransform(J_t.permute(2,0,1).unsqueeze(0), H, xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze().permute(1,2,0)\n"," else:\n"," J_w = AffineTransform(J_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n","\n"," #Apply registration to other channels\n","\n"," if Apply_registration_to_other_channels:\n","\n"," for n_channel in range(1, int(Number_of_other_channels)+1):\n","\n"," channel = imread(Additional_channels_folder+\"/C\"+str(n_channel)+\"_\"+image).astype(np.float32)\n"," channel_t = torch.tensor(channel).to(device)\n"," channel_w = AffineTransform(channel_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n"," channel_registered = channel_w.cpu().data.numpy()\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+\"C\"+str(n_channel)+\"_\"+image+\"_\"+Registration_mode+\"_registered.tif\", channel_registered)\n"," \n","# Export results to numpy array\n"," registered = J_w.cpu().data.numpy()\n","# Save results\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+image+\"_\"+Registration_mode+\"_registered.tif\", registered)\n","\n"," loop_number = loop_number + 1\n","\n"," print(\"Your images have been registered and saved in your result_folder\")\n","\n","\n","#Perspective registration\n","\n","if Registration_mode == \"Perspective\":\n","\n"," print(\"Perspective registration.....\")\n","\n"," for image in os.listdir(Moving_image_folder):\n","\n"," if path.isfile(Fixed_image_folder):\n"," I = imread(Fixed_image_folder).astype(np.float32) # fixed image\n","\n"," if path.isdir(Fixed_image_folder):\n"," Fixed_image = os.listdir(Fixed_image_folder)\n"," I = imread(Fixed_image).astype(np.float32) # fixed image\n","\n"," J = imread(Moving_image_folder+\"/\"+image).astype(np.float32)\n","\n"," # Here we generate the pyramidal images\n"," ifplot=True\n"," if np.ndim(I) == 3:\n"," nChannel=I.shape[2]\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))\n"," elif np.ndim(I) == 2:\n"," nChannel=1\n"," pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))\n"," else:\n"," print(\"Unknown rank for an image\")\n","\n","\n"," # create a list of necessary objects you will need and commit to GPU\n"," I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]\n"," for s in range(L):\n"," I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n"," J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)\n","\n"," if nChannel>1:\n"," I_lst.append(I_.permute(2,0,1))\n"," J_lst.append(J_.permute(2,0,1))\n"," h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]\n","\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_)\n"," else:\n"," I_lst.append(I_)\n"," J_lst.append(J_)\n"," h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]\n","\n"," edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 10),\n"," np.ones((5,5),np.uint8),\n"," iterations = 1)\n"," ind_ = torch.randperm(int(h_*w_*sampling))\n"," ind_lst.append(ind_)\n"," h_lst.append(h_)\n"," w_lst.append(w_)\n","\n"," y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])\n"," y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0\n"," xy_ = torch.stack([x_,y_],2)\n"," xy_lst.append(xy_)\n","\n"," homography_net = HomographyNet().to(device)\n"," mine_net = MINE().to(device)\n","\n"," optimizer = optim.Adam([{'params': mine_net.parameters(), 'lr': 1e-3},\n"," {'params': homography_net.vL, 'lr': 1e-3},\n"," {'params': homography_net.v1, 'lr': 1e-4}], amsgrad=True)\n"," mi_list = []\n"," for itr in range(number_of_iteration):\n"," optimizer.zero_grad()\n"," loss = multi_resolution_loss()\n"," mi_list.append(-loss.item())\n"," loss.backward()\n"," optimizer.step()\n"," clear_output(wait=True)\n"," plt.plot(mi_list)\n"," plt.xlabel('Iteration number')\n"," plt.ylabel('MI')\n"," plt.title(image+\". Image registration \"+str(loop_number)+\" out of \"+str(len(os.listdir(Moving_image_folder)))+\".\")\n"," plt.show()\n","\n"," I_t = torch.tensor(I).to(device) # without Gaussian\n"," J_t = torch.tensor(J).to(device) # without Gaussian\n"," H = homography_net(0)\n"," if nChannel>1:\n"," J_w = PerspectiveTransform(J_t.permute(2,0,1).unsqueeze(0), H, xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze().permute(1,2,0)\n"," else:\n"," J_w = PerspectiveTransform(J_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n","\n"," #Apply registration to other channels\n","\n"," if Apply_registration_to_other_channels:\n","\n"," for n_channel in range(1, int(Number_of_other_channels)+1):\n","\n"," channel = imread(Additional_channels_folder+\"/C\"+str(n_channel)+\"_\"+image).astype(np.float32)\n"," channel_t = torch.tensor(channel).to(device)\n"," channel_w = PerspectiveTransform(channel_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()\n"," channel_registered = channel_w.cpu().data.numpy()\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+\"C\"+str(n_channel)+\"_\"+image+\"_Perspective_registered.tif\", channel_registered) \n","\n","\n","# Export results to numpy array\n"," registered = J_w.cpu().data.numpy()\n","# Save results\n"," io.imsave(Result_folder+'/'+model_name+\"/\"+image+\"_Perspective_registered.tif\", registered)\n","\n"," loop_number = loop_number + 1\n","\n"," print(\"Your images have been registered and saved in your result_folder\")\n","\n","\n","# PDF export missing \n","\n","#pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)\n","\n"]},{"cell_type":"markdown","metadata":{"id":"PfTw_pQUUAqB"},"source":["## **4.3. Assess the registration**\n","---\n","\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"SrArBvqwYvc9"},"outputs":[],"source":["# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.\n","\n","# For sliders and dropdown menu and progress bar\n","from ipywidgets import interact\n","import ipywidgets as widgets\n","\n","print('--------------------------------------------------------------')\n","@interact\n","def show_QC_results(file = os.listdir(Moving_image_folder)):\n","\n"," moving_image = imread(Moving_image_folder+\"/\"+file).astype(np.float32)\n"," \n"," registered_image = imread(Result_folder+\"/\"+model_name+\"/\"+file+\"_\"+Registration_mode+\"_registered.tif\").astype(np.float32)\n","\n","#Here we display one image\n","\n"," f=plt.figure(figsize=(20,20))\n"," plt.subplot(1,5,1)\n"," plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest')\n"," plt.title('Fixed image')\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,2)\n"," plt.imshow(moving_image, norm=simple_norm(moving_image, percent = 99), interpolation='nearest')\n"," plt.title('Moving image')\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,3)\n"," plt.imshow(registered_image, norm=simple_norm(registered_image, percent = 99), interpolation='nearest')\n"," plt.title(\"Registered image\")\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,4)\n"," plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest', cmap=\"Greens\")\n"," plt.imshow(moving_image, norm=simple_norm(moving_image, percent = 99), interpolation='nearest', cmap=\"Oranges\", alpha=0.5)\n"," plt.title(\"Fixed and moving images\")\n"," plt.axis('off');\n","\n"," plt.subplot(1,5,5)\n"," plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest', cmap=\"Greens\")\n"," plt.imshow(registered_image, norm=simple_norm(registered_image, percent = 99), interpolation='nearest', cmap=\"Oranges\", alpha=0.5)\n"," plt.title(\"Fixed and Registered images\")\n"," plt.axis('off');\n","\n"," plt.show()"]},{"cell_type":"markdown","metadata":{"id":"wgO7Ok1PBFQj"},"source":["## **4.4. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"XXsUh88HqYay"},"source":["# **5. Version log**\n","---\n","**v1.13**: \n","\n","* This version now includes built-in version check and the version log that you're reading now."]},{"cell_type":"markdown","metadata":{"id":"nlyPYwZu4VVS"},"source":["#**Thank you for using DRMIME 2D!**"]}],"metadata":{"accelerator":"GPU","colab":{"collapsed_sections":[],"name":"DRMIME_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1hzAI0joLETcG5sI2Qvo8AKDr0TWRKySJ","timestamp":1587653755731},{"file_id":"1QFcz4NnQv4rMwDNl7AzHajN-Ola9sUFW","timestamp":1586411847878},{"file_id":"12UDRQ7abcnXcf5FctR9IUStgCpBiQWn7","timestamp":1584466922281},{"file_id":"1zXCn3A39GI1MCnXK_g_Z-AWh9vkB0YhU","timestamp":1583244415636}],"toc_visible":true},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.9"}},"nbformat":4,"nbformat_minor":0} diff --git a/Colab_notebooks/DecoNoising_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/DecoNoising_2D_ZeroCostDL4Mic.ipynb index 88ec51ec..219eb430 100644 --- a/Colab_notebooks/DecoNoising_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/DecoNoising_2D_ZeroCostDL4Mic.ipynb @@ -157,13 +157,17 @@ "Notebook_version = '1.13.1'\n", "Network = 'DecoNoising'\n", "\n", + "import os \n", + "\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", "\n", "#@markdown ##Install DecoNoising and dependencies\n", "\n", "from builtins import any as b_any\n", "\n", "def get_requirements_path():\n", - " # Store requirements file in 'contents' directory\n", + " # Store requirements file in 'base_path' directory\n", " current_dir = os.getcwd()\n", " dir_count = current_dir.count('/') - 1\n", " path = '../' * (dir_count) + 'requirements.txt'\n", @@ -208,11 +212,11 @@ "\n", "!git clone https://github.com/juglab/PN2V\n", "\n", - "sys.path.append('/content/PN2V')\n", + "sys.path.append(base_path + '/PN2V')\n", "\n", "from pn2v import training\n", "\n", - "sys.path.append('/content/DecoNoising')\n", + "sys.path.append(base_path + '/DecoNoising')\n", "\n", "import matplotlib.pyplot as plt\n", "from unet.model import UNet\n", @@ -447,8 +451,8 @@ " pdf.ln(1)\n", " pdf.cell(60, 5, txt = 'Example Training Image', ln=1)\n", " pdf.ln(1)\n", - " exp_size = io.imread('/content/TrainingDataExample.png').shape\n", - " pdf.image('/content/TrainingDataExample.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", + " exp_size = io.imread(base_path + '/TrainingDataExample.png').shape\n", + " pdf.image(base_path + '/TrainingDataExample.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", " pdf.ln(1)\n", " ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n", " pdf.multi_cell(190, 5, txt = ref_1, align='L')\n", @@ -719,7 +723,7 @@ "\n", "# mount user's Google Drive to Google Colab.\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')" + "drive.mount(base_path + '/gdrive')" ] }, { @@ -880,15 +884,15 @@ " Noisy_for_validation = 1\n", "\n", "#Here we split the training dataset between training and validation\n", - "# Everything is copied in the /Content Folder\n", - "Training_source_temp = \"/content/training_source\"\n", + "# Everything is copied in the 'base_path' Folder\n", + "Training_source_temp = base_path + \"/training_source\"\n", "\n", "if os.path.exists(Training_source_temp):\n", " shutil.rmtree(Training_source_temp)\n", "os.makedirs(Training_source_temp)\n", "\n", "\n", - "Validation_source_temp = \"/content/validation_source\"\n", + "Validation_source_temp = base_path + \"/validation_source\"\n", "\n", "if os.path.exists(Validation_source_temp):\n", " shutil.rmtree(Validation_source_temp)\n", @@ -921,7 +925,7 @@ "plt.imshow(x, interpolation='nearest', norm=norm, cmap='magma')\n", "plt.title('Training source')\n", "plt.axis('off');\n", - "plt.savefig('/content/TrainingDataExample.png',bbox_inches='tight',pad_inches=0)\n" + "plt.savefig(base_path + '/TrainingDataExample.png',bbox_inches='tight',pad_inches=0)\n" ] }, { @@ -1110,7 +1114,7 @@ "\n", " if pretrained_model_choice == \"Model_name\":\n", " pretrained_model_name = \"Model_name\"\n", - " pretrained_model_path = \"/content/\"+pretrained_model_name\n", + " pretrained_model_path = base_path + \"/\"+pretrained_model_name\n", " print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n", " if os.path.exists(pretrained_model_path):\n", " shutil.rmtree(pretrained_model_path)\n", diff --git a/Colab_notebooks/DenoiSeg_ZeroCostDL4Mic.ipynb b/Colab_notebooks/DenoiSeg_ZeroCostDL4Mic.ipynb index 1dbb4d41..8fbff609 100644 --- a/Colab_notebooks/DenoiSeg_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/DenoiSeg_ZeroCostDL4Mic.ipynb @@ -232,7 +232,7 @@ "from builtins import any as b_any\n", "\n", "def get_requirements_path():\n", - " # Store requirements file in 'contents' directory\n", + " # Store requirements file in 'base_path' directory\n", " current_dir = os.getcwd()\n", " dir_count = current_dir.count('/') - 1\n", " path = '../' * (dir_count) + 'requirements.txt'\n", @@ -333,6 +333,9 @@ "import subprocess\n", "from pip._internal.operations.freeze import freeze\n", "\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", + "\n", "# Colors for the warning messages\n", "class bcolors:\n", " WARNING = '\\033[31m'\n", @@ -509,8 +512,8 @@ " pdf.ln(1)\n", " pdf.cell(60, 5, txt = 'Example Training pair', ln=1)\n", " pdf.ln(1)\n", - " exp_size = io.imread('/content/TrainingDataExample_DenoiSeg.png').shape\n", - " pdf.image('/content/TrainingDataExample_DenoiSeg.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", + " exp_size = io.imread(base_path + '/TrainingDataExample_DenoiSeg.png').shape\n", + " pdf.image(base_path + '/TrainingDataExample_DenoiSeg.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", " pdf.ln(1)\n", " ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n", " pdf.multi_cell(190, 5, txt = ref_1, align='L')\n", @@ -800,7 +803,7 @@ "\n", "# mount user's Google Drive to Google Colab.\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')" + "drive.mount(base_path + '/gdrive')" ] }, { @@ -953,25 +956,25 @@ "\n", "\n", "#Here we split the training dataset between training and validation\n", - "# Everything is copied in the /Content Folder\n", - "Training_source_temp = \"/content/training_source\"\n", + "# Everything is copied in the 'base_path' Folder\n", + "Training_source_temp = base_path + \"/training_source\"\n", "\n", "if os.path.exists(Training_source_temp):\n", " shutil.rmtree(Training_source_temp)\n", "os.makedirs(Training_source_temp)\n", "\n", - "Training_target_temp = \"/content/training_target\"\n", + "Training_target_temp = base_path + \"/training_target\"\n", "if os.path.exists(Training_target_temp):\n", " shutil.rmtree(Training_target_temp)\n", "os.makedirs(Training_target_temp)\n", "\n", - "Validation_source_temp = \"/content/validation_source\"\n", + "Validation_source_temp = base_path + \"/validation_source\"\n", "\n", "if os.path.exists(Validation_source_temp):\n", " shutil.rmtree(Validation_source_temp)\n", "os.makedirs(Validation_source_temp)\n", "\n", - "Validation_target_temp = \"/content/validation_target\"\n", + "Validation_target_temp = base_path + \"/validation_target\"\n", "if os.path.exists(Validation_target_temp):\n", " shutil.rmtree(Validation_target_temp)\n", "os.makedirs(Validation_target_temp)\n", @@ -1015,7 +1018,7 @@ "plt.imshow(y, interpolation='nearest', vmin=0, vmax=1, cmap='viridis')\n", "plt.title('Training target')\n", "plt.axis('off');\n", - "plt.savefig('/content/TrainingDataExample_DenoiSeg.png',bbox_inches='tight',pad_inches=0)\n", + "plt.savefig(base_path + '/TrainingDataExample_DenoiSeg.png',bbox_inches='tight',pad_inches=0)\n", "\n" ] }, @@ -1113,7 +1116,7 @@ "\n", " if pretrained_model_choice == \"Model_name\":\n", " pretrained_model_name = \"Model_name\"\n", - " pretrained_model_path = \"/content/\"+pretrained_model_name\n", + " pretrained_model_path = base_path + \"/\"+pretrained_model_name\n", " print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n", " if os.path.exists(pretrained_model_path):\n", " shutil.rmtree(pretrained_model_path)\n", diff --git a/Colab_notebooks/Detectron2_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/Detectron2_2D_ZeroCostDL4Mic.ipynb index a1e9c507..2bf83637 100644 --- a/Colab_notebooks/Detectron2_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/Detectron2_2D_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"JhcOyBQjR54F"},"source":["#**This notebook is in beta**\n","Expect some instabilities and bugs.\n","\n","**Currently missing features include:**\n","\n","- Augmentation cannot be disabled\n","- Exported results include only a simple CSV file. More options will be included in the next releases\n","- Training and QC reports are not generated\n"]},{"cell_type":"markdown","metadata":{"id":"IkSguVy8Xv83"},"source":["# **Detectron2 (2D)**\n","\n"," Detectron2 is a deep-learning method designed to perform object detection and classification of objects in images. Detectron2 is Facebook AI Research's next generation software system that implements state-of-the-art object detection algorithms. It is a ground-up rewrite of the previous version, Detectron, and it originates from maskrcnn-benchmark. More information on Detectron2 can be found on the Detectron2 github pages (https://github.com/facebookresearch/detectron2).\n","\n","\n","\n","**This particular notebook enables object detection and classification on 2D images given ground truth bounding boxes. If you are interested in image segmentation, you should use our U-net or Stardist notebooks instead.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"NDICs5NxYEWP"},"source":["# **License**\n","\n","---"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"R575GX8cX2aP"},"outputs":[],"source":["#@markdown ##Double click to see the license information\n","\n","#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------\n","#This ZeroCostDL4Mic notebook is distributed under the MIT licence\n","\n","\n","\n","#------------------------- LICENSE FOR CycleGAN ------------------------------------\n","\n","\n","#Apache License\n","#Version 2.0, January 2004\n","#http://www.apache.org/licenses/\n","\n","#TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n","\n","#1. Definitions.\n","\n","#\"License\" shall mean the terms and conditions for use, reproduction,\n","#and distribution as defined by Sections 1 through 9 of this document.\n","\n","#\"Licensor\" shall mean the copyright owner or entity authorized by\n","#the copyright owner that is granting the License.\n","\n","#\"Legal Entity\" shall mean the union of the acting entity and all\n","#other entities that control, are controlled by, or are under common\n","#control with that entity. For the purposes of this definition,\n","#\"control\" means (i) the power, direct or indirect, to cause the\n","#direction or management of such entity, whether by contract or\n","#otherwise, or (ii) ownership of fifty percent (50%) or more of the\n","#outstanding shares, or (iii) beneficial ownership of such entity.\n","\n","#\"You\" (or \"Your\") shall mean an individual or Legal Entity\n","#exercising permissions granted by this License.\n","\n","#\"Source\" form shall mean the preferred form for making modifications,\n","#including but not limited to software source code, documentation\n","#source, and configuration files.\n","\n","#\"Object\" form shall mean any form resulting from mechanical\n","#transformation or translation of a Source form, including but\n","#not limited to compiled object code, generated documentation,\n","#and conversions to other media types.\n","\n","#\"Work\" shall mean the work of authorship, whether in Source or\n","#Object form, made available under the License, as indicated by a\n","#copyright notice that is included in or attached to the work\n","#(an example is provided in the Appendix below).\n","\n","#\"Derivative Works\" shall mean any work, whether in Source or Object\n","#form, that is based on (or derived from) the Work and for which the\n","#editorial revisions, annotations, elaborations, or other modifications\n","#represent, as a whole, an original work of authorship. For the purposes\n","#of this License, Derivative Works shall not include works that remain\n","#separable from, or merely link (or bind by name) to the interfaces of,\n","#the Work and Derivative Works thereof.\n","\n","#\"Contribution\" shall mean any work of authorship, including\n","#the original version of the Work and any modifications or additions\n","#to that Work or Derivative Works thereof, that is intentionally\n","#submitted to Licensor for inclusion in the Work by the copyright owner\n","#or by an individual or Legal Entity authorized to submit on behalf of\n","#the copyright owner. For the purposes of this definition, \"submitted\"\n","#means any form of electronic, verbal, or written communication sent\n","#to the Licensor or its representatives, including but not limited to\n","#communication on electronic mailing lists, source code control systems,\n","#and issue tracking systems that are managed by, or on behalf of, the\n","#Licensor for the purpose of discussing and improving the Work, but\n","#excluding communication that is conspicuously marked or otherwise\n","#designated in writing by the copyright owner as \"Not a Contribution.\"\n","\n","#\"Contributor\" shall mean Licensor and any individual or Legal Entity\n","#on behalf of whom a Contribution has been received by Licensor and\n","#subsequently incorporated within the Work.\n","\n","#2. Grant of Copyright License. Subject to the terms and conditions of\n","#this License, each Contributor hereby grants to You a perpetual,\n","#worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n","#copyright license to reproduce, prepare Derivative Works of,\n","#publicly display, publicly perform, sublicense, and distribute the\n","#Work and such Derivative Works in Source or Object form.\n","\n","#3. Grant of Patent License. Subject to the terms and conditions of\n","#this License, each Contributor hereby grants to You a perpetual,\n","#worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n","#(except as stated in this section) patent license to make, have made,\n","#use, offer to sell, sell, import, and otherwise transfer the Work,\n","#where such license applies only to those patent claims licensable\n","#by such Contributor that are necessarily infringed by their\n","#Contribution(s) alone or by combination of their Contribution(s)\n","#with the Work to which such Contribution(s) was submitted. If You\n","#institute patent litigation against any entity (including a\n","#cross-claim or counterclaim in a lawsuit) alleging that the Work\n","#or a Contribution incorporated within the Work constitutes direct\n","#or contributory patent infringement, then any patent licenses\n","#granted to You under this License for that Work shall terminate\n","#as of the date such litigation is filed.\n","\n","#4. Redistribution. You may reproduce and distribute copies of the\n","#Work or Derivative Works thereof in any medium, with or without\n","#modifications, and in Source or Object form, provided that You\n","#meet the following conditions:\n","\n","#(a) You must give any other recipients of the Work or\n","#Derivative Works a copy of this License; and\n","\n","#(b) You must cause any modified files to carry prominent notices\n","#stating that You changed the files; and\n","\n","#(c) You must retain, in the Source form of any Derivative Works\n","#that You distribute, all copyright, patent, trademark, and\n","#attribution notices from the Source form of the Work,\n","#excluding those notices that do not pertain to any part of\n","#the Derivative Works; and\n","\n","#(d) If the Work includes a \"NOTICE\" text file as part of its\n","#distribution, then any Derivative Works that You distribute must\n","#include a readable copy of the attribution notices contained\n","#within such NOTICE file, excluding those notices that do not\n","#pertain to any part of the Derivative Works, in at least one\n","#of the following places: within a NOTICE text file distributed\n","#as part of the Derivative Works; within the Source form or\n","#documentation, if provided along with the Derivative Works; or,\n","#within a display generated by the Derivative Works, if and\n","#wherever such third-party notices normally appear. The contents\n","#of the NOTICE file are for informational purposes only and\n","#do not modify the License. You may add Your own attribution\n","#notices within Derivative Works that You distribute, alongside\n","#or as an addendum to the NOTICE text from the Work, provided\n","#that such additional attribution notices cannot be construed\n","#as modifying the License.\n","\n","#You may add Your own copyright statement to Your modifications and\n","#may provide additional or different license terms and conditions\n","#for use, reproduction, or distribution of Your modifications, or\n","#for any such Derivative Works as a whole, provided Your use,\n","#reproduction, and distribution of the Work otherwise complies with\n","#the conditions stated in this License.\n","\n","#5. Submission of Contributions. Unless You explicitly state otherwise,\n","#any Contribution intentionally submitted for inclusion in the Work\n","#by You to the Licensor shall be under the terms and conditions of\n","#this License, without any additional terms or conditions.\n","#Notwithstanding the above, nothing herein shall supersede or modify\n","#the terms of any separate license agreement you may have executed\n","#with Licensor regarding such Contributions.\n","\n","#6. Trademarks. This License does not grant permission to use the trade\n","#names, trademarks, service marks, or product names of the Licensor,\n","#except as required for reasonable and customary use in describing the\n","#origin of the Work and reproducing the content of the NOTICE file.\n","\n","#7. Disclaimer of Warranty. Unless required by applicable law or\n","#agreed to in writing, Licensor provides the Work (and each\n","#Contributor provides its Contributions) on an \"AS IS\" BASIS,\n","#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n","#implied, including, without limitation, any warranties or conditions\n","#of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n","#PARTICULAR PURPOSE. You are solely responsible for determining the\n","#appropriateness of using or redistributing the Work and assume any\n","#risks associated with Your exercise of permissions under this License.\n","\n","#8. Limitation of Liability. In no event and under no legal theory,\n","#whether in tort (including negligence), contract, or otherwise,\n","#unless required by applicable law (such as deliberate and grossly\n","#negligent acts) or agreed to in writing, shall any Contributor be\n","#liable to You for damages, including any direct, indirect, special,\n","#incidental, or consequential damages of any character arising as a\n","#result of this License or out of the use or inability to use the\n","#Work (including but not limited to damages for loss of goodwill,\n","#work stoppage, computer failure or malfunction, or any and all\n","#other commercial damages or losses), even if such Contributor\n","#has been advised of the possibility of such damages.\n","\n","#9. Accepting Warranty or Additional Liability. While redistributing\n","#the Work or Derivative Works thereof, You may choose to offer,\n","#and charge a fee for, acceptance of support, warranty, indemnity,\n","#or other liability obligations and/or rights consistent with this\n","#License. However, in accepting such obligations, You may act only\n","#on Your own behalf and on Your sole responsibility, not on behalf\n","#of any other Contributor, and only if You agree to indemnify,\n","#defend, and hold each Contributor harmless for any liability\n","#incurred by, or claims asserted against, such Contributor by reason\n","#of your accepting any such warranty or additional liability.\n","\n","#END OF TERMS AND CONDITIONS\n","\n","#APPENDIX: How to apply the Apache License to your work.\n","\n","#To apply the Apache License to your work, attach the following\n","#boilerplate notice, with the fields enclosed by brackets \"[]\"\n","#replaced with your own identifying information. (Don't include\n","#the brackets!) The text should be enclosed in the appropriate\n","#comment syntax for the file format. We also recommend that a\n","#file or class name and description of purpose be included on the\n","#same \"printed page\" as the copyright notice for easier\n","#identification within third-party archives.\n","\n","#Copyright [yyyy] [name of copyright owner]\n","\n","\n","#Licensed under the Apache License, Version 2.0 (the \"License\");\n","#you may not use this file except in compliance with the License.\n","#You may obtain a copy of the License at\n","\n","#http://www.apache.org/licenses/LICENSE-2.0\n","\n","#Unless required by applicable law or agreed to in writing, software\n","#distributed under the License is distributed on an \"AS IS\" BASIS,\n","#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n","#See the License for the specific language governing permissions and\n","#limitations under the License."]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"gKDLkLWUd-YX"},"source":["#**0. Before getting started**\n","---\n"," Preparing the dataset carefully is essential to make this Detectron2 notebook work. This model requires as input a set of images and as target a list of annotation files in Pascal VOC format. The annotation files should have the exact same name as the input files, except with an .xml instead of the .jpg extension. The annotation files contain the class labels and all bounding boxes for the objects for each image in your dataset. Most datasets will give the option of saving the annotations in this format or using software for hand-annotations will automatically save the annotations in this format. \n","\n"," If you want to assemble your own dataset we recommend using the open source https://www.makesense.ai/ resource. You can follow our instructions on how to label your dataset with this tool on our [wiki](https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki/Object-Detection-(YOLOv2)).\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .png files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Input images (Training_source)\n"," - img_1.png, img_2.png, ...\n"," - High SNR images (Training_source_annotations)\n"," - img_1.xml, img_2.xml, ...\n"," - **Quality control dataset**\n"," - Input images\n"," - img_1.png, img_2.png\n"," - High SNR images\n"," - img_1.xml, img_2.xml\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin"},"source":["# **1. Install Detectron2 and dependencies**\n","---"]},{"cell_type":"markdown","metadata":{"id":"yg1vZe88JEyk"},"source":["## **1.1. Install key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"Tw1Usk1iPvRQ"},"outputs":[],"source":[" #@markdown ##Install dependencies and Detectron2\n","\n","from builtins import any as b_any\n","\n","def get_requirements_path():\n"," # Store requirements file in 'contents' directory\n"," current_dir = os.getcwd()\n"," dir_count = current_dir.count('/') - 1\n"," path = '../' * (dir_count) + 'requirements.txt'\n"," return path\n","\n","def filter_files(file_list, filter_list):\n"," filtered_list = []\n"," for fname in file_list:\n"," if b_any(fname.split('==')[0] in s for s in filter_list):\n"," filtered_list.append(fname)\n"," return filtered_list\n","\n","def build_requirements_file(before, after):\n"," path = get_requirements_path()\n","\n"," # Exporting requirements.txt for local run\n"," !pip freeze > $path\n","\n"," # Get minimum requirements file\n"," df = pd.read_csv(path)\n"," mod_list = [m.split('.')[0] for m in after if not m in before]\n"," req_list_temp = df.values.tolist()\n"," req_list = [x[0] for x in req_list_temp]\n","\n"," # Replace with package name and handle cases where import name is different to module name\n"," mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n"," mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]\n"," filtered_list = filter_files(req_list, mod_replace_list)\n","\n"," file=open(path,'w')\n"," for item in filtered_list:\n"," file.writelines(item)\n","\n"," file.close()\n","\n","import sys\n","before = [str(m) for m in sys.modules]\n","\n","# install dependencies\n","#!pip install -U torch torchvision cython\n","!pip install -U 'git+https://github.com/facebookresearch/fvcore.git' 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'\n","import torch, torchvision\n","import os\n","import pandas as pd\n","torch.__version__\n"," \n","!git clone https://github.com/facebookresearch/detectron2 detectron2_repo\n","!pip install -e detectron2_repo\n","\n","!pip install wget\n","\n","#Force session restart\n","exit(0)\n","\n","# Build requirements file for local run\n","after = [str(m) for m in sys.modules]\n","build_requirements_file(before, after)"]},{"cell_type":"markdown","metadata":{"id":"xhWNIu6cf5G8"},"source":["## **1.2. Restart your runtime**\n","---\n","\n","\n","\n","** Ignore the following message error message. Your Runtime has automatically restarted. This is normal.**\n","\n","\"\"
\n"]},{"cell_type":"markdown","metadata":{"id":"5nXTBntzKRWu"},"source":["## **1.3. Load key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"fq21zJVFNASx"},"outputs":[],"source":["Notebook_version = '1.13'\n","Network = 'Detectron 2D'\n","\n","\n","#@markdown ##Play this cell to load the required dependencies\n","import wget\n","# Some basic setup: \n","import detectron2\n","from detectron2.utils.logger import setup_logger\n","setup_logger()\n"," \n","# import some common libraries\n","import numpy as np\n","import os, json, cv2, random\n","from google.colab.patches import cv2_imshow\n"," \n","import yaml\n"," \n","#Download the script to convert XML into COCO\n"," \n","wget.download(\"https://github.com/HenriquesLab/ZeroCostDL4Mic/raw/master/Tools/voc2coco.py\", \"/content\")\n"," \n"," \n","# import some common detectron2 utilities\n","from detectron2 import model_zoo\n","from detectron2.engine import DefaultPredictor\n","from detectron2.config import get_cfg\n","from detectron2.utils.visualizer import Visualizer\n","from detectron2.data import MetadataCatalog, DatasetCatalog\n","from detectron2.utils.visualizer import ColorMode\n","\n","from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader\n","from datetime import datetime\n","from detectron2.data.catalog import Metadata\n","\n","from detectron2.config import get_cfg\n","from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader\n","from detectron2.evaluation import COCOEvaluator, inference_on_dataset\n","from detectron2.engine import DefaultTrainer\n","from detectron2.data.datasets import register_coco_instances\n","from detectron2.utils.visualizer import ColorMode\n","import glob\n","from detectron2.checkpoint import Checkpointer\n","from detectron2.config import get_cfg\n","import os\n"," \n"," \n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n"," \n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n"," \n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n"," \n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n"," \n"," \n","from detectron2.engine import DefaultTrainer\n","from detectron2.evaluation import COCOEvaluator\n"," \n","class CocoTrainer(DefaultTrainer):\n"," \n"," @classmethod\n"," def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n"," \n"," if output_folder is None:\n"," os.makedirs(\"coco_eval\", exist_ok=True)\n"," output_folder = \"coco_eval\"\n"," \n"," return COCOEvaluator(dataset_name, cfg, False, output_folder)\n"," \n"," \n"," \n","print(\"Librairies loaded\")\n"," \n","# Check if this is the latest version of the notebook\n","All_notebook_versions = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv\", dtype=str)\n","print('Notebook version: '+Notebook_version)\n","Latest_Notebook_version = All_notebook_versions[All_notebook_versions[\"Notebook\"] == Network]['Version'].iloc[0]\n","print('Latest notebook version: '+Latest_Notebook_version)\n","if Notebook_version == Latest_Notebook_version:\n"," print(\"This notebook is up-to-date.\")\n","else:\n"," print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n","\n","\n"," \n","#Failsafes\n","cell_ran_prediction = 0\n","cell_ran_training = 0\n","cell_ran_QC_training_dataset = 0\n","cell_ran_QC_QC_dataset = 0"]},{"cell_type":"markdown","metadata":{"id":"cbTknRcviyT7"},"source":["# **2. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb"},"source":["## **2.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelerator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"h5i5CS2bSmZr"},"outputs":[],"source":["#@markdown ##Run this cell to check if you have GPU access\n","#%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"]},{"cell_type":"markdown","metadata":{"id":"n3B3meGTbYVi"},"source":["## **2.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"01Djr8v-5pPk"},"outputs":[],"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n"," \n","#@markdown * Click on the URL. \n"," \n","#@markdown * Sign in your Google Account. \n"," \n","#@markdown * Copy the authorization code. \n"," \n","#@markdown * Enter the authorization code. \n"," \n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n"," \n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount('/content/gdrive')"]},{"cell_type":"markdown","metadata":{"id":"xm5YEhKq-Hse"},"source":["** If you cannot see your files, reactivate your session by connecting to your hosted runtime.** \n","\n","\n","\"Example
Connect to a hosted runtime.
"]},{"cell_type":"markdown","metadata":{"id":"iwjra6kMKmUA"},"source":["# **3. Select your parameters and paths**\n"]},{"cell_type":"markdown","metadata":{"id":"Kbn9_JdqnNnK"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and the annotation data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`labels`:** Input the name of the differentes labels used to annotate your dataset (separated by a comma).\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_iteration`:** Input how many iterations to use to train the network. Initial results can be observed using 1000 iterations but consider using 5000 or more iterations to train your models. **Default value: 2000**\n"," \n","\n","**Advanced Parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Noise2Void requires a large batch size for stable training. Reduce this parameter if your GPU runs out of memory. **Default value: 128**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10**\n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0001**\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"ewpNJ_I0Mv47"},"outputs":[],"source":["# create DataGenerator-object.\n","\n","\n","#@markdown ###Path to training image(s): \n","Training_source = \"\" #@param {type:\"string\"}\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Labels\n","#@markdown Input the name of the differentes labels present in your training dataset separated by a comma\n","labels = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ### Model name and path:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","full_model_path = model_path+'/'+model_name+'/'\n","\n","\n","#@markdown ###Training Parameters\n","#@markdown Number of iterations:\n","number_of_iteration = 2000#@param {type:\"number\"}\n","\n","\n","#Here we store the informations related to our labels\n","\n","list_of_labels = labels.split(\", \")\n","with open('/content/labels.txt', 'w') as f:\n"," for item in list_of_labels:\n"," print(item, file=f)\n","\n","number_of_labels = len(list_of_labels)\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True#@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","batch_size = 4#@param {type:\"number\"}\n","percentage_validation = 10#@param {type:\"number\"}\n","initial_learning_rate = 0.001 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 4\n"," percentage_validation = 10\n"," initial_learning_rate = 0.001\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = True\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = True\n","\n","# Here we split the data between training and validation\n","# Here we count the number of files in the training target folder\n","Filelist = os.listdir(Training_target)\n","number_files = len(Filelist)\n","\n","File_for_validation = int((number_files)/percentage_validation)+1\n","\n","#Here we split the training dataset between training and validation\n","# Everything is copied in the /Content Folder\n","\n","Training_source_temp = \"/content/training_source\"\n","\n","if os.path.exists(Training_source_temp):\n"," shutil.rmtree(Training_source_temp)\n","os.makedirs(Training_source_temp)\n","\n","Training_target_temp = \"/content/training_target\"\n","if os.path.exists(Training_target_temp):\n"," shutil.rmtree(Training_target_temp)\n","os.makedirs(Training_target_temp)\n","\n","Validation_source_temp = \"/content/validation_source\"\n","\n","if os.path.exists(Validation_source_temp):\n"," shutil.rmtree(Validation_source_temp)\n","os.makedirs(Validation_source_temp)\n","\n","Validation_target_temp = \"/content/validation_target\"\n","if os.path.exists(Validation_target_temp):\n"," shutil.rmtree(Validation_target_temp)\n","os.makedirs(Validation_target_temp)\n","\n","list_source = os.listdir(os.path.join(Training_source))\n","list_target = os.listdir(os.path.join(Training_target))\n","\n","#Move files into the temporary source and target directories:\n","\n"," \n","for f in os.listdir(os.path.join(Training_source)):\n"," shutil.copy(Training_source+\"/\"+f, Training_source_temp+\"/\"+f)\n","\n","for p in os.listdir(os.path.join(Training_target)):\n"," shutil.copy(Training_target+\"/\"+p, Training_target_temp+\"/\"+p)\n","\n","\n","list_source_temp = os.listdir(os.path.join(Training_source_temp))\n","list_target_temp = os.listdir(os.path.join(Training_target_temp))\n","\n","\n","#Here we move images to be used for validation\n","for i in range(File_for_validation):\n","\n"," name = list_source_temp[i]\n"," shutil.move(Training_source_temp+\"/\"+name, Validation_source_temp+\"/\"+name)\n","\n"," shortname_no_extension = name[:-4]\n","\n"," shutil.move(Training_target_temp+\"/\"+shortname_no_extension+\".xml\", Validation_target_temp+\"/\"+shortname_no_extension+\".xml\")\n","\n","# Here we convert the XML files into COCO format to be loaded in detectron2\n","\n","#First we need to create list of labels to generate the json dictionaries\n","\n","list_source_training_temp = os.listdir(os.path.join(Training_source_temp))\n","list_source_validation_temp = os.listdir(os.path.join(Validation_source_temp))\n","\n","\n","name_no_extension_training = []\n","for n in list_source_training_temp:\n"," name_no_extension_training.append(os.path.splitext(n)[0])\n","\n","name_no_extension_validation = []\n","for n in list_source_validation_temp:\n"," name_no_extension_validation.append(os.path.splitext(n)[0])\n","\n","#Save the list of labels as text file\n","\n","with open('/content/training_files.txt', 'w') as f:\n"," for item in name_no_extension_training:\n"," print(item, end='\\n', file=f)\n","\n","with open('/content/validation_files.txt', 'w') as f:\n"," for item in name_no_extension_validation:\n"," print(item, end='\\n', file=f)\n","\n","\n","file_output_training = Training_target_temp+\"/output.json\"\n","file_output_validation = Validation_target_temp+\"/output.json\"\n","\n","\n","os.chdir(\"/content\")\n","!python voc2coco.py --ann_dir \"$Training_target_temp\" --output \"$file_output_training\" --ann_ids \"/content/training_files.txt\" --labels \"/content/labels.txt\" --ext xml\n","!python voc2coco.py --ann_dir \"$Validation_target_temp\" --output \"$file_output_validation\" --ann_ids \"/content/validation_files.txt\" --labels \"/content/labels.txt\" --ext xml\n","\n","\n","os.chdir(\"/\")\n","\n","#Here we load the dataset to detectron2\n","if cell_ran_training == 0:\n"," from detectron2.data.datasets import register_coco_instances\n"," register_coco_instances(\"my_dataset_train\", {}, Training_target_temp+\"/output.json\", Training_source_temp)\n"," register_coco_instances(\"my_dataset_val\", {}, Validation_target_temp+\"/output.json\", Validation_source_temp)\n","\n","\n","#visualize training data\n","my_dataset_train_metadata = MetadataCatalog.get(\"my_dataset_train\")\n","\n","dataset_dicts = DatasetCatalog.get(\"my_dataset_train\")\n","\n","import random\n","from detectron2.utils.visualizer import Visualizer\n","\n","for d in random.sample(dataset_dicts, 1):\n"," img = cv2.imread(d[\"file_name\"])\n"," visualizer = Visualizer(img[:, :, ::-1], metadata=my_dataset_train_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.8)\n"," vis = visualizer.draw_dataset_dict(d)\n"," cv2_imshow(vis.get_image()[:, :, ::-1])\n","\n","# failsafe\n","cell_ran_training = 1"]},{"cell_type":"markdown","metadata":{"id":"STDOuNOFsTTJ"},"source":["## **3.2. Data augmentation** \n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"E4QW-tvYsWhX"},"source":["Data augmentation is currently enabled by default in this notebook. The option to disable data augmentation is not yet avaialble.\n"," "]},{"cell_type":"markdown","metadata":{"id":"W6pZg0KVnPzf"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a Detectron2 model**. \n","\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"l-EDcv3Wyvqb"},"outputs":[],"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = True #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Faster R-CNN\" #@param [\"Faster R-CNN\",\"RetinaNet\", \"Model_from_file\"]\n","\n","#pretrained_model_choice = \"Faster R-CNN\" #@param [\"Faster R-CNN\", \"RetinaNet\", \"RPN & Fast R-CNN\", \"Model_from_file\"]\n","\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = pretrained_model_path\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n"," if not os.path.exists(h5_file_path) and Use_pretrained_model:\n"," print('WARNING pretrained model does not exist')\n"," h5_file_path = \"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"\n"," print('The Faster R-CNN model will be used.')\n"," \n"," if pretrained_model_choice == \"Faster R-CNN\":\n"," h5_file_path = \"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"\n"," print('The Faster R-CNN model will be used.')\n"," \n"," if pretrained_model_choice == \"RetinaNet\":\n"," h5_file_path = \"COCO-Detection/retinanet_R_101_FPN_3x.yaml\"\n"," print('The RetinaNet model will be used.')\n","\n"," if pretrained_model_choice == \"RPN & Fast R-CNN\":\n"," h5_file_path = \"COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml\"\n","\n","\n","if not Use_pretrained_model:\n"," h5_file_path = \"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"\n"," print('The Faster R-CNN model will be used.')"]},{"cell_type":"markdown","metadata":{"id":"HLYcZR9gMv42"},"source":["#**4. Train the network**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"DapHLZBVMNBZ"},"source":["\n","## **4.1. Start Trainning**\n","---\n","\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"Nft44VSLU8ZH"},"outputs":[],"source":["#@markdown ##Start training\n","\n","# Create the model folder\n","\n","if os.path.exists(full_model_path):\n"," shutil.rmtree(full_model_path)\n","os.makedirs(full_model_path)\n","\n","#Copy the label names in the model folder\n","shutil.copy(\"/content/labels.txt\", full_model_path+\"/\"+\"labels.txt\")\n","\n","#PDF export\n","#######################################\n","## MISSING \n","#######################################\n","#To be added\n","\n","start = time.time()\n","\n","#Load the config files\n","cfg = get_cfg()\n","\n","if pretrained_model_choice == \"Model_from_file\":\n"," cfg.merge_from_file(pretrained_model_path+\"/config.yaml\")\n","\n","if not pretrained_model_choice == \"Model_from_file\":\n"," cfg.merge_from_file(model_zoo.get_config_file(h5_file_path))\n","\n","cfg.DATASETS.TRAIN = (\"my_dataset_train\",)\n","cfg.DATASETS.TEST = (\"my_dataset_val\",)\n","cfg.OUTPUT_DIR= (full_model_path)\n","cfg.DATALOADER.NUM_WORKERS = 4\n","\n","if pretrained_model_choice == \"Model_from_file\":\n"," cfg.MODEL.WEIGHTS = pretrained_model_path+\"/model_final.pth\" # Let training initialize from model zoo\n","\n","if not pretrained_model_choice == \"Model_from_file\":\n"," cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(h5_file_path) # Let training initialize from model zoo\n","\n","cfg.SOLVER.IMS_PER_BATCH = int(batch_size)\n","cfg.SOLVER.BASE_LR = initial_learning_rate\n","\n","cfg.SOLVER.WARMUP_ITERS = 1000\n","cfg.SOLVER.MAX_ITER = int(number_of_iteration) #adjust up if val mAP is still rising, adjust down if overfit\n","cfg.SOLVER.STEPS = (1000, 1500)\n","cfg.SOLVER.GAMMA = 0.05\n","\n","cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512\n","\n","if pretrained_model_choice == \"Faster R-CNN\":\n"," cfg.MODEL.ROI_HEADS.NUM_CLASSES = (number_of_labels) \n","\n","if pretrained_model_choice == \"RetinaNet\":\n"," cfg.MODEL.RETINANET.NUM_CLASSES = (number_of_labels) \n","\n","cfg.TEST.EVAL_PERIOD = 500\n","trainer = CocoTrainer(cfg)\n","\n","trainer.resume_or_load(resume=False)\n","trainer.train()\n","\n","#Save the config file after trainning\n","config= cfg.dump() # print formatted configs\n","\n","file1 = open(full_model_path+\"/config.yaml\", 'w') \n"," \n","file1.writelines(config) \n","file1.close() #to change file access modes\n","\n","#Save the label file after trainning\n","\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n"]},{"cell_type":"markdown","metadata":{"id":"Vd9igRYvSnTr"},"source":["## **4.2. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"sTMDT1u7rK9g"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. Detectron 2 requires you to reload your training dataset in order to perform the quality control step.\n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"OVxLyPyPiv85"},"outputs":[],"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder as well as the location of your training dataset:\n","\n","#@markdown ####Path to trained model to be assessed: \n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ####Path to the image(s) used for training: \n","Training_source = \"\" #@param {type:\"string\"}\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","\n","else: \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","# Here we load the list of classes stored in the model folder\n","list_of_labels_QC =[]\n","with open(full_QC_model_path+'labels.txt', newline='') as csvfile:\n"," reader = csv.reader(csvfile)\n"," for row in csv.reader(csvfile):\n"," list_of_labels_QC.append(row[0])\n","\n","#Here we create a list of color for later display\n","color_list = []\n","for i in range(len(list_of_labels_QC)):\n"," color = list(np.random.choice(range(256), size=3))\n"," color_list.append(color)\n","\n","#Save the list of labels as text file \n","if not (Use_the_current_trained_model):\n"," with open('/content/labels.txt', 'w') as f:\n"," for item in list_of_labels_QC:\n"," print(item, file=f)\n","\n"," # Here we split the data between training and validation\n"," # Here we count the number of files in the training target folder\n"," Filelist = os.listdir(Training_target)\n"," number_files = len(Filelist)\n"," percentage_validation= 10\n","\n"," File_for_validation = int((number_files)/percentage_validation)+1\n","\n"," #Here we split the training dataset between training and validation\n"," # Everything is copied in the /Content Folder\n","\n"," Training_source_temp = \"/content/training_source\"\n","\n"," if os.path.exists(Training_source_temp):\n"," shutil.rmtree(Training_source_temp)\n"," os.makedirs(Training_source_temp)\n","\n"," Training_target_temp = \"/content/training_target\"\n"," if os.path.exists(Training_target_temp):\n"," shutil.rmtree(Training_target_temp)\n"," os.makedirs(Training_target_temp)\n","\n"," Validation_source_temp = \"/content/validation_source\"\n","\n"," if os.path.exists(Validation_source_temp):\n"," shutil.rmtree(Validation_source_temp)\n"," os.makedirs(Validation_source_temp)\n","\n"," Validation_target_temp = \"/content/validation_target\"\n"," if os.path.exists(Validation_target_temp):\n"," shutil.rmtree(Validation_target_temp)\n"," os.makedirs(Validation_target_temp)\n","\n"," list_source = os.listdir(os.path.join(Training_source))\n"," list_target = os.listdir(os.path.join(Training_target))\n","\n","#Move files into the temporary source and target directories:\n"," \n"," for f in os.listdir(os.path.join(Training_source)):\n"," shutil.copy(Training_source+\"/\"+f, Training_source_temp+\"/\"+f)\n","\n"," for p in os.listdir(os.path.join(Training_target)):\n"," shutil.copy(Training_target+\"/\"+p, Training_target_temp+\"/\"+p)\n","\n"," list_source_temp = os.listdir(os.path.join(Training_source_temp))\n"," list_target_temp = os.listdir(os.path.join(Training_target_temp))\n","\n","\n","#Here we move images to be used for validation\n"," for i in range(File_for_validation):\n","\n"," name = list_source_temp[i]\n"," shutil.move(Training_source_temp+\"/\"+name, Validation_source_temp+\"/\"+name)\n","\n"," shortname_no_extension = name[:-4]\n","\n"," shutil.move(Training_target_temp+\"/\"+shortname_no_extension+\".xml\", Validation_target_temp+\"/\"+shortname_no_extension+\".xml\")\n","\n","\n","#First we need to create list of labels to generate the json dictionaries\n","\n"," list_source_training_temp = os.listdir(os.path.join(Training_source_temp))\n"," list_source_validation_temp = os.listdir(os.path.join(Validation_source_temp))\n","\n"," name_no_extension_training = []\n"," for n in list_source_training_temp:\n"," name_no_extension_training.append(os.path.splitext(n)[0])\n","\n"," name_no_extension_validation = []\n"," for n in list_source_validation_temp:\n"," name_no_extension_validation.append(os.path.splitext(n)[0])\n","\n","#Save the list of labels as text file\n","\n"," with open('/content/training_files.txt', 'w') as f:\n"," for item in name_no_extension_training:\n"," print(item, end='\\n', file=f)\n","\n"," with open('/content/validation_files.txt', 'w') as f:\n"," for item in name_no_extension_validation:\n"," print(item, end='\\n', file=f)\n","\n"," file_output_training = Training_target_temp+\"/output.json\"\n"," file_output_validation = Validation_target_temp+\"/output.json\"\n","\n"," os.chdir(\"/content\")\n"," !python voc2coco.py --ann_dir \"$Training_target_temp\" --output \"$file_output_training\" --ann_ids \"/content/training_files.txt\" --labels \"/content/labels.txt\" --ext xml\n"," !python voc2coco.py --ann_dir \"$Validation_target_temp\" --output \"$file_output_validation\" --ann_ids \"/content/validation_files.txt\" --labels \"/content/labels.txt\" --ext xml\n","\n"," os.chdir(\"/\")\n","\n","#Here we load the dataset to detectron2\n"," if cell_ran_QC_training_dataset == 0:\n"," from detectron2.data.datasets import register_coco_instances\n"," register_coco_instances(\"my_dataset_train\", {}, Training_target_temp+\"/output.json\", Training_source_temp)\n"," register_coco_instances(\"my_dataset_val\", {}, Validation_target_temp+\"/output.json\", Validation_source_temp)\n"," \n","#Failsafe for later\n","cell_ran_QC_training_dataset = 1"]},{"cell_type":"markdown","metadata":{"id":"WZDvRjLZu-Lm"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","It is good practice to evaluate the training progress by studying if your model is slowly improving over time. The following cell will allow you to load Tensorboard and investigate how several metric evolved over time (iterations).\n","\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"cap-cHIfNZnm"},"outputs":[],"source":["#@markdown ##Play the cell to load tensorboard\n","%load_ext tensorboard\n","%tensorboard --logdir \"$full_QC_model_path\""]},{"cell_type":"markdown","metadata":{"id":"lreUY7-SsGkI"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will compare the predictions generated by your model against ground-truth. Additionally, the below cell will show the mAP value of the model on the QC data If you want to read in more detail about this score, we recommend [this brief explanation](https://medium.com/@jonathan_hui/map-mean-average-precision-for-object-detection-45c121a31173).\n","\n"," The images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" should contain images (e.g. as .png) and annotations (.xml files)!\n","\n","\n","**mAP score:** This refers to the mean average precision of the model on the given dataset. This value gives an indication how precise the predictions of the classes on this dataset are when compared to the ground-truth. Values closer to 1 indicate a good fit.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"kjbHJHbtsg2R"},"outputs":[],"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","if cell_ran_QC_QC_dataset == 0:\n","#Save the list of labels as text file \n"," with open('/content/labels_QC.txt', 'w') as f:\n"," for item in list_of_labels_QC:\n"," print(item, file=f)\n","\n","#Here we create temp folder for the QC\n","\n"," QC_source_temp = \"/content/QC_source\"\n","\n"," if os.path.exists(QC_source_temp):\n"," shutil.rmtree(QC_source_temp)\n"," os.makedirs(QC_source_temp)\n","\n"," QC_target_temp = \"/content/QC_target\"\n"," if os.path.exists(QC_target_temp):\n"," shutil.rmtree(QC_target_temp)\n"," os.makedirs(QC_target_temp)\n","\n","# Create a quality control/Prediction Folder\n"," if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n"," os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","#Here we move the QC files to the temp\n","\n"," for f in os.listdir(os.path.join(Source_QC_folder)):\n"," shutil.copy(Source_QC_folder+\"/\"+f, QC_source_temp+\"/\"+f)\n","\n"," for p in os.listdir(os.path.join(Target_QC_folder)):\n"," shutil.copy(Target_QC_folder+\"/\"+p, QC_target_temp+\"/\"+p)\n","\n","#Here we convert the XML files into JSON\n","#Save the list of files\n","\n"," list_source_QC_temp = os.listdir(os.path.join(QC_source_temp))\n","\n"," name_no_extension_QC = []\n"," for n in list_source_QC_temp:\n"," name_no_extension_QC.append(os.path.splitext(n)[0])\n","\n"," with open('/content/QC_files.txt', 'w') as f:\n"," for item in name_no_extension_QC:\n"," print(item, end='\\n', file=f)\n","\n","#Convert XML into JSON\n"," file_output_QC = QC_target_temp+\"/output.json\"\n","\n"," os.chdir(\"/content\")\n"," !python voc2coco.py --ann_dir \"$QC_target_temp\" --output \"$file_output_QC\" --ann_ids \"/content/QC_files.txt\" --labels \"/content/labels.txt\" --ext xml\n","\n"," os.chdir(\"/\")\n","\n","\n","#Here we register the QC dataset\n"," register_coco_instances(\"my_dataset_QC\", {}, QC_target_temp+\"/output.json\", QC_source_temp)\n"," cell_ran_QC_QC_dataset = 1\n","\n","\n","#Load the model to use\n","cfg = get_cfg()\n","cfg.merge_from_file(full_QC_model_path+\"config.yaml\")\n","cfg.MODEL.WEIGHTS = os.path.join(full_QC_model_path, \"model_final.pth\")\n","cfg.DATASETS.TEST = (\"my_dataset_QC\", )\n","cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n","\n","#Metadata\n","test_metadata = MetadataCatalog.get(\"my_dataset_QC\")\n","test_metadata.set(thing_color = color_list)\n","\n","# For the evaluation we need to load the trainer\n","trainer = CocoTrainer(cfg)\n","trainer.resume_or_load(resume=True)\n","\n","# Here we need to load the predictor\n","\n","predictor = DefaultPredictor(cfg)\n","evaluator = COCOEvaluator(\"my_dataset_QC\", cfg, False, output_dir=QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","val_loader = build_detection_test_loader(cfg, \"my_dataset_QC\")\n","inference_on_dataset(trainer.model, val_loader, evaluator)\n","\n","\n","print(\"A prediction is displayed\")\n","\n","dataset_QC_dicts = DatasetCatalog.get(\"my_dataset_QC\")\n","\n","for d in random.sample(dataset_QC_dicts, 1):\n"," print(\"Ground Truth\")\n"," img = cv2.imread(d[\"file_name\"])\n"," visualizer = Visualizer(img[:, :, ::-1], metadata=test_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.5)\n"," vis = visualizer.draw_dataset_dict(d)\n"," cv2_imshow(vis.get_image()[:, :, ::-1])\n","\n"," print(\"A prediction is displayed\")\n"," im = cv2.imread(d[\"file_name\"])\n"," outputs = predictor(im)\n"," v = Visualizer(im[:, :, ::-1],\n"," metadata=test_metadata,\n"," instance_mode=ColorMode.SEGMENTATION, \n"," scale=0.5\n"," )\n"," out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n"," cv2_imshow(out.get_image()[:, :, ::-1])\n","\n","cell_ran_QC_QC_dataset = 1"]},{"cell_type":"markdown","metadata":{"id":"DWAhOBc7gpzN"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"KAILvLGFS2-1"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you will train.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"lp-cx8TDIGI-"},"outputs":[],"source":["\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","#@markdown ###Path to data to analyse and where predicted output should be saved:\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder as well as the location of your training dataset:\n","\n","#@markdown ####Path to trained model to be assessed: \n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","\n","\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(bcolors.WARNING +'!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","#Here we will load the label file\n","\n","list_of_labels_predictions =[]\n","with open(full_Prediction_model_path+'labels.txt', newline='') as csvfile:\n"," reader = csv.reader(csvfile)\n"," for row in csv.reader(csvfile):\n"," list_of_labels_predictions.append(row[0])\n","\n","#Here we create a list of color\n","color_list = []\n","for i in range(len(list_of_labels_predictions)):\n"," color = list(np.random.choice(range(256), size=3))\n"," color_list.append(color)\n","\n","#Activate the pretrained model. \n","# Create config\n","cfg = get_cfg()\n","cfg.merge_from_file(full_Prediction_model_path+\"config.yaml\")\n","cfg.MODEL.WEIGHTS = os.path.join(full_Prediction_model_path, \"model_final.pth\")\n","cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\n","\n","# Create predictor\n","predictor = DefaultPredictor(cfg)\n","\n","#Load the metadata from the prediction file\n","prediction_metadata = Metadata()\n","prediction_metadata.set(thing_classes = list_of_labels_predictions)\n","prediction_metadata.set(thing_color = color_list)\n","\n","start = datetime.now()\n","\n","validation_folder = Path(Data_folder)\n","\n","for i, file in enumerate(validation_folder.glob(\"*.png\")):\n"," # this loop opens the .png files from the val-folder, creates a dict with the file\n"," # information, plots visualizations and saves the result as .pkl files.\n"," file = str(file)\n"," file_name = file.split(\"/\")[-1]\n"," im = cv2.imread(file)\n","\n"," #Prediction are done here\n"," outputs = predictor(im)\n","\n"," #here we extract the results into numpy arrays\n","\n"," Classes_predictions = outputs[\"instances\"].pred_classes.cpu().data.numpy()\n","\n"," boxes_predictions = outputs[\"instances\"].pred_boxes.tensor.cpu().numpy()\n"," Score_predictions = outputs[\"instances\"].scores.cpu().data.numpy()\n"," \n"," #here we save the results into a csv file\n"," prediction_csv = Result_folder+\"/\"+file_name+\"_predictions.csv\"\n","\n"," with open(prediction_csv, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['x1','y1','x2','y2','box width','box height', 'class', 'score' ]) \n","\n"," for i in range(len(boxes_predictions)):\n","\n"," x1 = boxes_predictions[i][0]\n"," y1 = boxes_predictions[i][1]\n"," x2 = boxes_predictions[i][2]\n"," y2 = boxes_predictions[i][3]\n"," box_width = x2 - x1\n"," box_height = y2 -y1\n","\n"," writer.writerow([str(x1), str(y1), str(x2), str(y2), str(box_width), str(box_height), str(list_of_labels_predictions[Classes_predictions[i]]), Score_predictions[i]])\n","\n","\n","# The last example is displayed \n","v = Visualizer(im, metadata=prediction_metadata, instance_mode=ColorMode.SEGMENTATION, scale=1)\n","v = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\")) \n","plt.figure(figsize=(20,20))\n","plt.imshow(v.get_image()[:, :, ::-1])\n","plt.axis('off');\n","plt.savefig(Result_folder+\"/\"+file_name)\n"," \n","print(\"Time needed for inferencing:\", datetime.now() - start)\n","\n"]},{"cell_type":"markdown","metadata":{"id":"wgO7Ok1PBFQj"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"ir5oDtGF-34t"},"source":["# **7. Version log**\n","---\n","**v1.13**: \n","\n","* The section 1 and 2 are now swapped for better export of *requirements.txt*.\n","\n","* This version also now includes built-in version check and the version log that you're reading now."]},{"cell_type":"markdown","metadata":{"id":"nlyPYwZu4VVS"},"source":["#**Thank you for using Detectron2 2D!**"]}],"metadata":{"accelerator":"GPU","colab":{"collapsed_sections":[],"name":"Detectron2_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1hzAI0joLETcG5sI2Qvo8AKDr0TWRKySJ","timestamp":1587653755731},{"file_id":"1QFcz4NnQv4rMwDNl7AzHajN-Ola9sUFW","timestamp":1586411847878},{"file_id":"12UDRQ7abcnXcf5FctR9IUStgCpBiQWn7","timestamp":1584466922281},{"file_id":"1zXCn3A39GI1MCnXK_g_Z-AWh9vkB0YhU","timestamp":1583244415636}]},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.9"}},"nbformat":4,"nbformat_minor":0} +{"cells":[{"cell_type":"markdown","metadata":{"id":"JhcOyBQjR54F"},"source":["#**This notebook is in beta**\n","Expect some instabilities and bugs.\n","\n","**Currently missing features include:**\n","\n","- Augmentation cannot be disabled\n","- Exported results include only a simple CSV file. More options will be included in the next releases\n","- Training and QC reports are not generated\n"]},{"cell_type":"markdown","metadata":{"id":"IkSguVy8Xv83"},"source":["# **Detectron2 (2D)**\n","\n"," Detectron2 is a deep-learning method designed to perform object detection and classification of objects in images. Detectron2 is Facebook AI Research's next generation software system that implements state-of-the-art object detection algorithms. It is a ground-up rewrite of the previous version, Detectron, and it originates from maskrcnn-benchmark. More information on Detectron2 can be found on the Detectron2 github pages (https://github.com/facebookresearch/detectron2).\n","\n","\n","\n","**This particular notebook enables object detection and classification on 2D images given ground truth bounding boxes. If you are interested in image segmentation, you should use our U-net or Stardist notebooks instead.**\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"NDICs5NxYEWP"},"source":["# **License**\n","\n","---"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"R575GX8cX2aP"},"outputs":[],"source":["#@markdown ##Double click to see the license information\n","\n","#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------\n","#This ZeroCostDL4Mic notebook is distributed under the MIT licence\n","\n","\n","\n","#------------------------- LICENSE FOR CycleGAN ------------------------------------\n","\n","\n","#Apache License\n","#Version 2.0, January 2004\n","#http://www.apache.org/licenses/\n","\n","#TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n","\n","#1. Definitions.\n","\n","#\"License\" shall mean the terms and conditions for use, reproduction,\n","#and distribution as defined by Sections 1 through 9 of this document.\n","\n","#\"Licensor\" shall mean the copyright owner or entity authorized by\n","#the copyright owner that is granting the License.\n","\n","#\"Legal Entity\" shall mean the union of the acting entity and all\n","#other entities that control, are controlled by, or are under common\n","#control with that entity. For the purposes of this definition,\n","#\"control\" means (i) the power, direct or indirect, to cause the\n","#direction or management of such entity, whether by contract or\n","#otherwise, or (ii) ownership of fifty percent (50%) or more of the\n","#outstanding shares, or (iii) beneficial ownership of such entity.\n","\n","#\"You\" (or \"Your\") shall mean an individual or Legal Entity\n","#exercising permissions granted by this License.\n","\n","#\"Source\" form shall mean the preferred form for making modifications,\n","#including but not limited to software source code, documentation\n","#source, and configuration files.\n","\n","#\"Object\" form shall mean any form resulting from mechanical\n","#transformation or translation of a Source form, including but\n","#not limited to compiled object code, generated documentation,\n","#and conversions to other media types.\n","\n","#\"Work\" shall mean the work of authorship, whether in Source or\n","#Object form, made available under the License, as indicated by a\n","#copyright notice that is included in or attached to the work\n","#(an example is provided in the Appendix below).\n","\n","#\"Derivative Works\" shall mean any work, whether in Source or Object\n","#form, that is based on (or derived from) the Work and for which the\n","#editorial revisions, annotations, elaborations, or other modifications\n","#represent, as a whole, an original work of authorship. For the purposes\n","#of this License, Derivative Works shall not include works that remain\n","#separable from, or merely link (or bind by name) to the interfaces of,\n","#the Work and Derivative Works thereof.\n","\n","#\"Contribution\" shall mean any work of authorship, including\n","#the original version of the Work and any modifications or additions\n","#to that Work or Derivative Works thereof, that is intentionally\n","#submitted to Licensor for inclusion in the Work by the copyright owner\n","#or by an individual or Legal Entity authorized to submit on behalf of\n","#the copyright owner. For the purposes of this definition, \"submitted\"\n","#means any form of electronic, verbal, or written communication sent\n","#to the Licensor or its representatives, including but not limited to\n","#communication on electronic mailing lists, source code control systems,\n","#and issue tracking systems that are managed by, or on behalf of, the\n","#Licensor for the purpose of discussing and improving the Work, but\n","#excluding communication that is conspicuously marked or otherwise\n","#designated in writing by the copyright owner as \"Not a Contribution.\"\n","\n","#\"Contributor\" shall mean Licensor and any individual or Legal Entity\n","#on behalf of whom a Contribution has been received by Licensor and\n","#subsequently incorporated within the Work.\n","\n","#2. Grant of Copyright License. Subject to the terms and conditions of\n","#this License, each Contributor hereby grants to You a perpetual,\n","#worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n","#copyright license to reproduce, prepare Derivative Works of,\n","#publicly display, publicly perform, sublicense, and distribute the\n","#Work and such Derivative Works in Source or Object form.\n","\n","#3. Grant of Patent License. Subject to the terms and conditions of\n","#this License, each Contributor hereby grants to You a perpetual,\n","#worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n","#(except as stated in this section) patent license to make, have made,\n","#use, offer to sell, sell, import, and otherwise transfer the Work,\n","#where such license applies only to those patent claims licensable\n","#by such Contributor that are necessarily infringed by their\n","#Contribution(s) alone or by combination of their Contribution(s)\n","#with the Work to which such Contribution(s) was submitted. If You\n","#institute patent litigation against any entity (including a\n","#cross-claim or counterclaim in a lawsuit) alleging that the Work\n","#or a Contribution incorporated within the Work constitutes direct\n","#or contributory patent infringement, then any patent licenses\n","#granted to You under this License for that Work shall terminate\n","#as of the date such litigation is filed.\n","\n","#4. Redistribution. You may reproduce and distribute copies of the\n","#Work or Derivative Works thereof in any medium, with or without\n","#modifications, and in Source or Object form, provided that You\n","#meet the following conditions:\n","\n","#(a) You must give any other recipients of the Work or\n","#Derivative Works a copy of this License; and\n","\n","#(b) You must cause any modified files to carry prominent notices\n","#stating that You changed the files; and\n","\n","#(c) You must retain, in the Source form of any Derivative Works\n","#that You distribute, all copyright, patent, trademark, and\n","#attribution notices from the Source form of the Work,\n","#excluding those notices that do not pertain to any part of\n","#the Derivative Works; and\n","\n","#(d) If the Work includes a \"NOTICE\" text file as part of its\n","#distribution, then any Derivative Works that You distribute must\n","#include a readable copy of the attribution notices contained\n","#within such NOTICE file, excluding those notices that do not\n","#pertain to any part of the Derivative Works, in at least one\n","#of the following places: within a NOTICE text file distributed\n","#as part of the Derivative Works; within the Source form or\n","#documentation, if provided along with the Derivative Works; or,\n","#within a display generated by the Derivative Works, if and\n","#wherever such third-party notices normally appear. The contents\n","#of the NOTICE file are for informational purposes only and\n","#do not modify the License. You may add Your own attribution\n","#notices within Derivative Works that You distribute, alongside\n","#or as an addendum to the NOTICE text from the Work, provided\n","#that such additional attribution notices cannot be construed\n","#as modifying the License.\n","\n","#You may add Your own copyright statement to Your modifications and\n","#may provide additional or different license terms and conditions\n","#for use, reproduction, or distribution of Your modifications, or\n","#for any such Derivative Works as a whole, provided Your use,\n","#reproduction, and distribution of the Work otherwise complies with\n","#the conditions stated in this License.\n","\n","#5. Submission of Contributions. Unless You explicitly state otherwise,\n","#any Contribution intentionally submitted for inclusion in the Work\n","#by You to the Licensor shall be under the terms and conditions of\n","#this License, without any additional terms or conditions.\n","#Notwithstanding the above, nothing herein shall supersede or modify\n","#the terms of any separate license agreement you may have executed\n","#with Licensor regarding such Contributions.\n","\n","#6. Trademarks. This License does not grant permission to use the trade\n","#names, trademarks, service marks, or product names of the Licensor,\n","#except as required for reasonable and customary use in describing the\n","#origin of the Work and reproducing the content of the NOTICE file.\n","\n","#7. Disclaimer of Warranty. Unless required by applicable law or\n","#agreed to in writing, Licensor provides the Work (and each\n","#Contributor provides its Contributions) on an \"AS IS\" BASIS,\n","#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n","#implied, including, without limitation, any warranties or conditions\n","#of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n","#PARTICULAR PURPOSE. You are solely responsible for determining the\n","#appropriateness of using or redistributing the Work and assume any\n","#risks associated with Your exercise of permissions under this License.\n","\n","#8. Limitation of Liability. In no event and under no legal theory,\n","#whether in tort (including negligence), contract, or otherwise,\n","#unless required by applicable law (such as deliberate and grossly\n","#negligent acts) or agreed to in writing, shall any Contributor be\n","#liable to You for damages, including any direct, indirect, special,\n","#incidental, or consequential damages of any character arising as a\n","#result of this License or out of the use or inability to use the\n","#Work (including but not limited to damages for loss of goodwill,\n","#work stoppage, computer failure or malfunction, or any and all\n","#other commercial damages or losses), even if such Contributor\n","#has been advised of the possibility of such damages.\n","\n","#9. Accepting Warranty or Additional Liability. While redistributing\n","#the Work or Derivative Works thereof, You may choose to offer,\n","#and charge a fee for, acceptance of support, warranty, indemnity,\n","#or other liability obligations and/or rights consistent with this\n","#License. However, in accepting such obligations, You may act only\n","#on Your own behalf and on Your sole responsibility, not on behalf\n","#of any other Contributor, and only if You agree to indemnify,\n","#defend, and hold each Contributor harmless for any liability\n","#incurred by, or claims asserted against, such Contributor by reason\n","#of your accepting any such warranty or additional liability.\n","\n","#END OF TERMS AND CONDITIONS\n","\n","#APPENDIX: How to apply the Apache License to your work.\n","\n","#To apply the Apache License to your work, attach the following\n","#boilerplate notice, with the fields enclosed by brackets \"[]\"\n","#replaced with your own identifying information. (Don't include\n","#the brackets!) The text should be enclosed in the appropriate\n","#comment syntax for the file format. We also recommend that a\n","#file or class name and description of purpose be included on the\n","#same \"printed page\" as the copyright notice for easier\n","#identification within third-party archives.\n","\n","#Copyright [yyyy] [name of copyright owner]\n","\n","\n","#Licensed under the Apache License, Version 2.0 (the \"License\");\n","#you may not use this file except in compliance with the License.\n","#You may obtain a copy of the License at\n","\n","#http://www.apache.org/licenses/LICENSE-2.0\n","\n","#Unless required by applicable law or agreed to in writing, software\n","#distributed under the License is distributed on an \"AS IS\" BASIS,\n","#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n","#See the License for the specific language governing permissions and\n","#limitations under the License."]},{"cell_type":"markdown","metadata":{"id":"jWAz2i7RdxUV"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use our notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"gKDLkLWUd-YX"},"source":["#**0. Before getting started**\n","---\n"," Preparing the dataset carefully is essential to make this Detectron2 notebook work. This model requires as input a set of images and as target a list of annotation files in Pascal VOC format. The annotation files should have the exact same name as the input files, except with an .xml instead of the .jpg extension. The annotation files contain the class labels and all bounding boxes for the objects for each image in your dataset. Most datasets will give the option of saving the annotations in this format or using software for hand-annotations will automatically save the annotations in this format. \n","\n"," If you want to assemble your own dataset we recommend using the open source https://www.makesense.ai/ resource. You can follow our instructions on how to label your dataset with this tool on our [wiki](https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki/Object-Detection-(YOLOv2)).\n","\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that you currently can **only use .png files!**\n","\n","\n","Here's a common data structure that can work:\n","* Experiment A\n"," - **Training dataset**\n"," - Input images (Training_source)\n"," - img_1.png, img_2.png, ...\n"," - High SNR images (Training_source_annotations)\n"," - img_1.xml, img_2.xml, ...\n"," - **Quality control dataset**\n"," - Input images\n"," - img_1.png, img_2.png\n"," - High SNR images\n"," - img_1.xml, img_2.xml\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"n4yWFoJNnoin"},"source":["# **1. Install Detectron2 and dependencies**\n","---"]},{"cell_type":"markdown","metadata":{"id":"yg1vZe88JEyk"},"source":["## **1.1. Install key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"Tw1Usk1iPvRQ"},"outputs":[],"source":[" #@markdown ##Install dependencies and Detectron2\n","\n","from builtins import any as b_any\n","\n","def get_requirements_path():\n"," # Store requirements file in 'base_path' directory\n"," current_dir = os.getcwd()\n"," dir_count = current_dir.count('/') - 1\n"," path = '../' * (dir_count) + 'requirements.txt'\n"," return path\n","\n","def filter_files(file_list, filter_list):\n"," filtered_list = []\n"," for fname in file_list:\n"," if b_any(fname.split('==')[0] in s for s in filter_list):\n"," filtered_list.append(fname)\n"," return filtered_list\n","\n","def build_requirements_file(before, after):\n"," path = get_requirements_path()\n","\n"," # Exporting requirements.txt for local run\n"," !pip freeze > $path\n","\n"," # Get minimum requirements file\n"," df = pd.read_csv(path)\n"," mod_list = [m.split('.')[0] for m in after if not m in before]\n"," req_list_temp = df.values.tolist()\n"," req_list = [x[0] for x in req_list_temp]\n","\n"," # Replace with package name and handle cases where import name is different to module name\n"," mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n"," mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]\n"," filtered_list = filter_files(req_list, mod_replace_list)\n","\n"," file=open(path,'w')\n"," for item in filtered_list:\n"," file.writelines(item)\n","\n"," file.close()\n","\n","import sys\n","before = [str(m) for m in sys.modules]\n","\n","# install dependencies\n","#!pip install -U torch torchvision cython\n","!pip install -U 'git+https://github.com/facebookresearch/fvcore.git' 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'\n","import torch, torchvision\n","import os\n","import pandas as pd\n","torch.__version__\n","\n","#Create a variable to get and store relative base path\n","base_path = os.getcwd()\n","\n","!git clone https://github.com/facebookresearch/detectron2 detectron2_repo\n","!pip install -e detectron2_repo\n","\n","!pip install wget\n","\n","#Force session restart\n","exit(0)\n","\n","# Build requirements file for local run\n","after = [str(m) for m in sys.modules]\n","build_requirements_file(before, after)"]},{"cell_type":"markdown","metadata":{"id":"xhWNIu6cf5G8"},"source":["## **1.2. Restart your runtime**\n","---\n","\n","\n","\n","** Ignore the following message error message. Your Runtime has automatically restarted. This is normal.**\n","\n","\"\"
\n"]},{"cell_type":"markdown","metadata":{"id":"5nXTBntzKRWu"},"source":["## **1.3. Load key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"fq21zJVFNASx"},"outputs":[],"source":["Notebook_version = '1.13'\n","Network = 'Detectron 2D'\n","\n","\n","#@markdown ##Play this cell to load the required dependencies\n","import wget\n","# Some basic setup: \n","import detectron2\n","from detectron2.utils.logger import setup_logger\n","setup_logger()\n"," \n","# import some common libraries\n","import numpy as np\n","import os, json, cv2, random\n","from google.colab.patches import cv2_imshow\n"," \n","import yaml\n"," \n","#Download the script to convert XML into COCO\n"," \n","wget.download(\"https://github.com/HenriquesLab/ZeroCostDL4Mic/raw/master/Tools/voc2coco.py\", base_path + \"\")\n"," \n"," \n","# import some common detectron2 utilities\n","from detectron2 import model_zoo\n","from detectron2.engine import DefaultPredictor\n","from detectron2.config import get_cfg\n","from detectron2.utils.visualizer import Visualizer\n","from detectron2.data import MetadataCatalog, DatasetCatalog\n","from detectron2.utils.visualizer import ColorMode\n","\n","from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader\n","from datetime import datetime\n","from detectron2.data.catalog import Metadata\n","\n","from detectron2.config import get_cfg\n","from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader\n","from detectron2.evaluation import COCOEvaluator, inference_on_dataset\n","from detectron2.engine import DefaultTrainer\n","from detectron2.data.datasets import register_coco_instances\n","from detectron2.utils.visualizer import ColorMode\n","import glob\n","from detectron2.checkpoint import Checkpointer\n","from detectron2.config import get_cfg\n","import os\n"," \n"," \n","# ------- Common variable to all ZeroCostDL4Mic notebooks -------\n","import numpy as np\n","from matplotlib import pyplot as plt\n","import urllib\n","import os, random\n","import shutil \n","import zipfile\n","from tifffile import imread, imsave\n","import time\n","import sys\n"," \n","from pathlib import Path\n","import pandas as pd\n","import csv\n","from glob import glob\n","from scipy import signal\n","from scipy import ndimage\n","from skimage import io\n","from sklearn.linear_model import LinearRegression\n","from skimage.util import img_as_uint\n","import matplotlib as mpl\n","from skimage.metrics import structural_similarity\n","from skimage.metrics import peak_signal_noise_ratio as psnr\n","from astropy.visualization import simple_norm\n","from skimage import img_as_float32\n"," \n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n","W = '\\033[0m' # white (normal)\n","R = '\\033[31m' # red\n"," \n","#Disable some of the tensorflow warnings\n","import warnings\n","warnings.filterwarnings(\"ignore\")\n"," \n"," \n","from detectron2.engine import DefaultTrainer\n","from detectron2.evaluation import COCOEvaluator\n"," \n","class CocoTrainer(DefaultTrainer):\n"," \n"," @classmethod\n"," def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n"," \n"," if output_folder is None:\n"," os.makedirs(\"coco_eval\", exist_ok=True)\n"," output_folder = \"coco_eval\"\n"," \n"," return COCOEvaluator(dataset_name, cfg, False, output_folder)\n"," \n"," \n"," \n","print(\"Librairies loaded\")\n"," \n","# Check if this is the latest version of the notebook\n","All_notebook_versions = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv\", dtype=str)\n","print('Notebook version: '+Notebook_version)\n","Latest_Notebook_version = All_notebook_versions[All_notebook_versions[\"Notebook\"] == Network]['Version'].iloc[0]\n","print('Latest notebook version: '+Latest_Notebook_version)\n","if Notebook_version == Latest_Notebook_version:\n"," print(\"This notebook is up-to-date.\")\n","else:\n"," print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n","\n","\n"," \n","#Failsafes\n","cell_ran_prediction = 0\n","cell_ran_training = 0\n","cell_ran_QC_training_dataset = 0\n","cell_ran_QC_QC_dataset = 0"]},{"cell_type":"markdown","metadata":{"id":"cbTknRcviyT7"},"source":["# **2. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"DMNHVZfHmbKb"},"source":["## **2.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelerator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"h5i5CS2bSmZr"},"outputs":[],"source":["#@markdown ##Run this cell to check if you have GPU access\n","#%tensorflow_version 1.x\n","\n","\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"]},{"cell_type":"markdown","metadata":{"id":"n3B3meGTbYVi"},"source":["## **2.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"01Djr8v-5pPk"},"outputs":[],"source":["#@markdown ##Play the cell to connect your Google Drive to Colab\n"," \n","#@markdown * Click on the URL. \n"," \n","#@markdown * Sign in your Google Account. \n"," \n","#@markdown * Copy the authorization code. \n"," \n","#@markdown * Enter the authorization code. \n"," \n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n"," \n","# mount user's Google Drive to Google Colab.\n","from google.colab import drive\n","drive.mount(base_path + '/gdrive')"]},{"cell_type":"markdown","metadata":{"id":"xm5YEhKq-Hse"},"source":["** If you cannot see your files, reactivate your session by connecting to your hosted runtime.** \n","\n","\n","\"Example
Connect to a hosted runtime.
"]},{"cell_type":"markdown","metadata":{"id":"iwjra6kMKmUA"},"source":["# **3. Select your parameters and paths**\n"]},{"cell_type":"markdown","metadata":{"id":"Kbn9_JdqnNnK"},"source":["## **3.1. Setting main training parameters**\n","---\n"," "]},{"cell_type":"markdown","metadata":{"id":"CB6acvUFtWqd"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and the annotation data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`labels`:** Input the name of the differentes labels used to annotate your dataset (separated by a comma).\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training Parameters**\n","\n","**`number_of_iteration`:** Input how many iterations to use to train the network. Initial results can be observed using 1000 iterations but consider using 5000 or more iterations to train your models. **Default value: 2000**\n"," \n","\n","**Advanced Parameters - experienced users only**\n","\n","**`batch_size:`** This parameter defines the number of patches seen in each training step. Noise2Void requires a large batch size for stable training. Reduce this parameter if your GPU runs out of memory. **Default value: 128**\n","\n","**`number_of_steps`:** Define the number of training steps by epoch. By default this parameter is calculated so that each image / patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n","\n","**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10**\n","\n","**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0001**\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"ewpNJ_I0Mv47"},"outputs":[],"source":["# create DataGenerator-object.\n","\n","\n","#@markdown ###Path to training image(s): \n","Training_source = \"\" #@param {type:\"string\"}\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","#@markdown ###Labels\n","#@markdown Input the name of the differentes labels present in your training dataset separated by a comma\n","labels = \"\" #@param {type:\"string\"}\n","\n","\n","#@markdown ### Model name and path:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","full_model_path = model_path+'/'+model_name+'/'\n","\n","\n","#@markdown ###Training Parameters\n","#@markdown Number of iterations:\n","number_of_iteration = 2000#@param {type:\"number\"}\n","\n","\n","#Here we store the informations related to our labels\n","\n","list_of_labels = labels.split(\", \")\n","with open(base_path + '/labels.txt', 'w') as f:\n"," for item in list_of_labels:\n"," print(item, file=f)\n","\n","number_of_labels = len(list_of_labels)\n","\n","\n","#@markdown ###Advanced Parameters\n","\n","Use_Default_Advanced_Parameters = True#@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please input:\n","batch_size = 4#@param {type:\"number\"}\n","percentage_validation = 10#@param {type:\"number\"}\n","initial_learning_rate = 0.001 #@param {type:\"number\"}\n","\n","\n","if (Use_Default_Advanced_Parameters): \n"," print(\"Default advanced parameters enabled\")\n"," batch_size = 4\n"," percentage_validation = 10\n"," initial_learning_rate = 0.001\n","\n","# Here we disable pre-trained model by default (in case the next cell is not ran)\n","Use_pretrained_model = True\n","\n","# Here we disable data augmentation by default (in case the cell is not ran)\n","\n","Use_Data_augmentation = True\n","\n","# Here we split the data between training and validation\n","# Here we count the number of files in the training target folder\n","Filelist = os.listdir(Training_target)\n","number_files = len(Filelist)\n","\n","File_for_validation = int((number_files)/percentage_validation)+1\n","\n","#Here we split the training dataset between training and validation\n","# Everything is copied in the 'base_path' Folder\n","\n","Training_source_temp = base_path + \"/training_source\"\n","\n","if os.path.exists(Training_source_temp):\n"," shutil.rmtree(Training_source_temp)\n","os.makedirs(Training_source_temp)\n","\n","Training_target_temp = base_path + \"/training_target\"\n","if os.path.exists(Training_target_temp):\n"," shutil.rmtree(Training_target_temp)\n","os.makedirs(Training_target_temp)\n","\n","Validation_source_temp = base_path + \"/validation_source\"\n","\n","if os.path.exists(Validation_source_temp):\n"," shutil.rmtree(Validation_source_temp)\n","os.makedirs(Validation_source_temp)\n","\n","Validation_target_temp = base_path + \"/validation_target\"\n","if os.path.exists(Validation_target_temp):\n"," shutil.rmtree(Validation_target_temp)\n","os.makedirs(Validation_target_temp)\n","\n","list_source = os.listdir(os.path.join(Training_source))\n","list_target = os.listdir(os.path.join(Training_target))\n","\n","#Move files into the temporary source and target directories:\n","\n"," \n","for f in os.listdir(os.path.join(Training_source)):\n"," shutil.copy(Training_source+\"/\"+f, Training_source_temp+\"/\"+f)\n","\n","for p in os.listdir(os.path.join(Training_target)):\n"," shutil.copy(Training_target+\"/\"+p, Training_target_temp+\"/\"+p)\n","\n","\n","list_source_temp = os.listdir(os.path.join(Training_source_temp))\n","list_target_temp = os.listdir(os.path.join(Training_target_temp))\n","\n","\n","#Here we move images to be used for validation\n","for i in range(File_for_validation):\n","\n"," name = list_source_temp[i]\n"," shutil.move(Training_source_temp+\"/\"+name, Validation_source_temp+\"/\"+name)\n","\n"," shortname_no_extension = name[:-4]\n","\n"," shutil.move(Training_target_temp+\"/\"+shortname_no_extension+\".xml\", Validation_target_temp+\"/\"+shortname_no_extension+\".xml\")\n","\n","# Here we convert the XML files into COCO format to be loaded in detectron2\n","\n","#First we need to create list of labels to generate the json dictionaries\n","\n","list_source_training_temp = os.listdir(os.path.join(Training_source_temp))\n","list_source_validation_temp = os.listdir(os.path.join(Validation_source_temp))\n","\n","\n","name_no_extension_training = []\n","for n in list_source_training_temp:\n"," name_no_extension_training.append(os.path.splitext(n)[0])\n","\n","name_no_extension_validation = []\n","for n in list_source_validation_temp:\n"," name_no_extension_validation.append(os.path.splitext(n)[0])\n","\n","#Save the list of labels as text file\n","\n","with open(base_path + '/training_files.txt', 'w') as f:\n"," for item in name_no_extension_training:\n"," print(item, end='\\n', file=f)\n","\n","with open(base_path + '/validation_files.txt', 'w') as f:\n"," for item in name_no_extension_validation:\n"," print(item, end='\\n', file=f)\n","\n","\n","file_output_training = Training_target_temp+\"/output.json\"\n","file_output_validation = Validation_target_temp+\"/output.json\"\n","\n","\n","os.chdir(base_path + \"\")\n","!python voc2coco.py --ann_dir \"$Training_target_temp\" --output \"$file_output_training\" --ann_ids base_path + \"/training_files.txt\" --labels base_path + \"/labels.txt\" --ext xml\n","!python voc2coco.py --ann_dir \"$Validation_target_temp\" --output \"$file_output_validation\" --ann_ids base_path + \"/validation_files.txt\" --labels base_path + \"/labels.txt\" --ext xml\n","\n","\n","os.chdir(\"/\")\n","\n","#Here we load the dataset to detectron2\n","if cell_ran_training == 0:\n"," from detectron2.data.datasets import register_coco_instances\n"," register_coco_instances(\"my_dataset_train\", {}, Training_target_temp+\"/output.json\", Training_source_temp)\n"," register_coco_instances(\"my_dataset_val\", {}, Validation_target_temp+\"/output.json\", Validation_source_temp)\n","\n","\n","#visualize training data\n","my_dataset_train_metadata = MetadataCatalog.get(\"my_dataset_train\")\n","\n","dataset_dicts = DatasetCatalog.get(\"my_dataset_train\")\n","\n","import random\n","from detectron2.utils.visualizer import Visualizer\n","\n","for d in random.sample(dataset_dicts, 1):\n"," img = cv2.imread(d[\"file_name\"])\n"," visualizer = Visualizer(img[:, :, ::-1], metadata=my_dataset_train_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.8)\n"," vis = visualizer.draw_dataset_dict(d)\n"," cv2_imshow(vis.get_image()[:, :, ::-1])\n","\n","# failsafe\n","cell_ran_training = 1"]},{"cell_type":"markdown","metadata":{"id":"STDOuNOFsTTJ"},"source":["## **3.2. Data augmentation** \n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"E4QW-tvYsWhX"},"source":["Data augmentation is currently enabled by default in this notebook. The option to disable data augmentation is not yet avaialble.\n"," "]},{"cell_type":"markdown","metadata":{"id":"W6pZg0KVnPzf"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a Detectron2 model**. \n","\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"l-EDcv3Wyvqb"},"outputs":[],"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = True #@param {type:\"boolean\"}\n","\n","pretrained_model_choice = \"Faster R-CNN\" #@param [\"Faster R-CNN\",\"RetinaNet\", \"Model_from_file\"]\n","\n","#pretrained_model_choice = \"Faster R-CNN\" #@param [\"Faster R-CNN\", \"RetinaNet\", \"RPN & Fast R-CNN\", \"Model_from_file\"]\n","\n","\n","\n","#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","# --------------------- Check if we load a previously trained model ------------------------\n","if Use_pretrained_model:\n","\n","# --------------------- Load the model from the choosen path ------------------------\n"," if pretrained_model_choice == \"Model_from_file\":\n"," h5_file_path = pretrained_model_path\n"," print('Weights found in:')\n"," print(h5_file_path)\n"," print('will be loaded prior to training.')\n","\n"," if not os.path.exists(h5_file_path) and Use_pretrained_model:\n"," print('WARNING pretrained model does not exist')\n"," h5_file_path = \"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"\n"," print('The Faster R-CNN model will be used.')\n"," \n"," if pretrained_model_choice == \"Faster R-CNN\":\n"," h5_file_path = \"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"\n"," print('The Faster R-CNN model will be used.')\n"," \n"," if pretrained_model_choice == \"RetinaNet\":\n"," h5_file_path = \"COCO-Detection/retinanet_R_101_FPN_3x.yaml\"\n"," print('The RetinaNet model will be used.')\n","\n"," if pretrained_model_choice == \"RPN & Fast R-CNN\":\n"," h5_file_path = \"COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml\"\n","\n","\n","if not Use_pretrained_model:\n"," h5_file_path = \"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"\n"," print('The Faster R-CNN model will be used.')"]},{"cell_type":"markdown","metadata":{"id":"HLYcZR9gMv42"},"source":["#**4. Train the network**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"DapHLZBVMNBZ"},"source":["\n","## **4.1. Start Trainning**\n","---\n","\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"Nft44VSLU8ZH"},"outputs":[],"source":["#@markdown ##Start training\n","\n","# Create the model folder\n","\n","if os.path.exists(full_model_path):\n"," shutil.rmtree(full_model_path)\n","os.makedirs(full_model_path)\n","\n","#Copy the label names in the model folder\n","shutil.copy(base_path + \"/labels.txt\", full_model_path+\"/\"+\"labels.txt\")\n","\n","#PDF export\n","#######################################\n","## MISSING \n","#######################################\n","#To be added\n","\n","start = time.time()\n","\n","#Load the config files\n","cfg = get_cfg()\n","\n","if pretrained_model_choice == \"Model_from_file\":\n"," cfg.merge_from_file(pretrained_model_path+\"/config.yaml\")\n","\n","if not pretrained_model_choice == \"Model_from_file\":\n"," cfg.merge_from_file(model_zoo.get_config_file(h5_file_path))\n","\n","cfg.DATASETS.TRAIN = (\"my_dataset_train\",)\n","cfg.DATASETS.TEST = (\"my_dataset_val\",)\n","cfg.OUTPUT_DIR= (full_model_path)\n","cfg.DATALOADER.NUM_WORKERS = 4\n","\n","if pretrained_model_choice == \"Model_from_file\":\n"," cfg.MODEL.WEIGHTS = pretrained_model_path+\"/model_final.pth\" # Let training initialize from model zoo\n","\n","if not pretrained_model_choice == \"Model_from_file\":\n"," cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(h5_file_path) # Let training initialize from model zoo\n","\n","cfg.SOLVER.IMS_PER_BATCH = int(batch_size)\n","cfg.SOLVER.BASE_LR = initial_learning_rate\n","\n","cfg.SOLVER.WARMUP_ITERS = 1000\n","cfg.SOLVER.MAX_ITER = int(number_of_iteration) #adjust up if val mAP is still rising, adjust down if overfit\n","cfg.SOLVER.STEPS = (1000, 1500)\n","cfg.SOLVER.GAMMA = 0.05\n","\n","cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512\n","\n","if pretrained_model_choice == \"Faster R-CNN\":\n"," cfg.MODEL.ROI_HEADS.NUM_CLASSES = (number_of_labels) \n","\n","if pretrained_model_choice == \"RetinaNet\":\n"," cfg.MODEL.RETINANET.NUM_CLASSES = (number_of_labels) \n","\n","cfg.TEST.EVAL_PERIOD = 500\n","trainer = CocoTrainer(cfg)\n","\n","trainer.resume_or_load(resume=False)\n","trainer.train()\n","\n","#Save the config file after trainning\n","config= cfg.dump() # print formatted configs\n","\n","file1 = open(full_model_path+\"/config.yaml\", 'w') \n"," \n","file1.writelines(config) \n","file1.close() #to change file access modes\n","\n","#Save the label file after trainning\n","\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n"]},{"cell_type":"markdown","metadata":{"id":"Vd9igRYvSnTr"},"source":["## **4.2. Download your model(s) from Google Drive**\n","---\n","\n","Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder."]},{"cell_type":"markdown","metadata":{"id":"sTMDT1u7rK9g"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. Detectron 2 requires you to reload your training dataset in order to perform the quality control step.\n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"OVxLyPyPiv85"},"outputs":[],"source":["# model name and path\n","#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder as well as the location of your training dataset:\n","\n","#@markdown ####Path to trained model to be assessed: \n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","#@markdown ####Path to the image(s) used for training: \n","Training_source = \"\" #@param {type:\"string\"}\n","Training_target = \"\" #@param {type:\"string\"}\n","\n","\n","#Here we define the loaded model name and path\n","QC_model_name = os.path.basename(QC_model_folder)\n","QC_model_path = os.path.dirname(QC_model_folder)\n","\n","\n","if (Use_the_current_trained_model): \n"," QC_model_name = model_name\n"," QC_model_path = model_path\n","\n","\n","full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n","if os.path.exists(full_QC_model_path):\n"," print(\"The \"+QC_model_name+\" network will be evaluated\")\n","\n","else: \n"," print(bcolors.WARNING + '!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","# Here we load the list of classes stored in the model folder\n","list_of_labels_QC =[]\n","with open(full_QC_model_path+'labels.txt', newline='') as csvfile:\n"," reader = csv.reader(csvfile)\n"," for row in csv.reader(csvfile):\n"," list_of_labels_QC.append(row[0])\n","\n","#Here we create a list of color for later display\n","color_list = []\n","for i in range(len(list_of_labels_QC)):\n"," color = list(np.random.choice(range(256), size=3))\n"," color_list.append(color)\n","\n","#Save the list of labels as text file \n","if not (Use_the_current_trained_model):\n"," with open(base_path + '/labels.txt', 'w') as f:\n"," for item in list_of_labels_QC:\n"," print(item, file=f)\n","\n"," # Here we split the data between training and validation\n"," # Here we count the number of files in the training target folder\n"," Filelist = os.listdir(Training_target)\n"," number_files = len(Filelist)\n"," percentage_validation= 10\n","\n"," File_for_validation = int((number_files)/percentage_validation)+1\n","\n"," #Here we split the training dataset between training and validation\n"," # Everything is copied in the 'base_path' Folder\n","\n"," Training_source_temp = base_path + \"/training_source\"\n","\n"," if os.path.exists(Training_source_temp):\n"," shutil.rmtree(Training_source_temp)\n"," os.makedirs(Training_source_temp)\n","\n"," Training_target_temp = base_path + \"/training_target\"\n"," if os.path.exists(Training_target_temp):\n"," shutil.rmtree(Training_target_temp)\n"," os.makedirs(Training_target_temp)\n","\n"," Validation_source_temp = base_path + \"/validation_source\"\n","\n"," if os.path.exists(Validation_source_temp):\n"," shutil.rmtree(Validation_source_temp)\n"," os.makedirs(Validation_source_temp)\n","\n"," Validation_target_temp = base_path + \"/validation_target\"\n"," if os.path.exists(Validation_target_temp):\n"," shutil.rmtree(Validation_target_temp)\n"," os.makedirs(Validation_target_temp)\n","\n"," list_source = os.listdir(os.path.join(Training_source))\n"," list_target = os.listdir(os.path.join(Training_target))\n","\n","#Move files into the temporary source and target directories:\n"," \n"," for f in os.listdir(os.path.join(Training_source)):\n"," shutil.copy(Training_source+\"/\"+f, Training_source_temp+\"/\"+f)\n","\n"," for p in os.listdir(os.path.join(Training_target)):\n"," shutil.copy(Training_target+\"/\"+p, Training_target_temp+\"/\"+p)\n","\n"," list_source_temp = os.listdir(os.path.join(Training_source_temp))\n"," list_target_temp = os.listdir(os.path.join(Training_target_temp))\n","\n","\n","#Here we move images to be used for validation\n"," for i in range(File_for_validation):\n","\n"," name = list_source_temp[i]\n"," shutil.move(Training_source_temp+\"/\"+name, Validation_source_temp+\"/\"+name)\n","\n"," shortname_no_extension = name[:-4]\n","\n"," shutil.move(Training_target_temp+\"/\"+shortname_no_extension+\".xml\", Validation_target_temp+\"/\"+shortname_no_extension+\".xml\")\n","\n","\n","#First we need to create list of labels to generate the json dictionaries\n","\n"," list_source_training_temp = os.listdir(os.path.join(Training_source_temp))\n"," list_source_validation_temp = os.listdir(os.path.join(Validation_source_temp))\n","\n"," name_no_extension_training = []\n"," for n in list_source_training_temp:\n"," name_no_extension_training.append(os.path.splitext(n)[0])\n","\n"," name_no_extension_validation = []\n"," for n in list_source_validation_temp:\n"," name_no_extension_validation.append(os.path.splitext(n)[0])\n","\n","#Save the list of labels as text file\n","\n"," with open(base_path + '/training_files.txt', 'w') as f:\n"," for item in name_no_extension_training:\n"," print(item, end='\\n', file=f)\n","\n"," with open(base_path + '/validation_files.txt', 'w') as f:\n"," for item in name_no_extension_validation:\n"," print(item, end='\\n', file=f)\n","\n"," file_output_training = Training_target_temp+\"/output.json\"\n"," file_output_validation = Validation_target_temp+\"/output.json\"\n","\n"," os.chdir(base_path + \"\")\n"," !python voc2coco.py --ann_dir \"$Training_target_temp\" --output \"$file_output_training\" --ann_ids base_path + \"/training_files.txt\" --labels base_path + \"/labels.txt\" --ext xml\n"," !python voc2coco.py --ann_dir \"$Validation_target_temp\" --output \"$file_output_validation\" --ann_ids base_path + \"/validation_files.txt\" --labels base_path + \"/labels.txt\" --ext xml\n","\n"," os.chdir(\"/\")\n","\n","#Here we load the dataset to detectron2\n"," if cell_ran_QC_training_dataset == 0:\n"," from detectron2.data.datasets import register_coco_instances\n"," register_coco_instances(\"my_dataset_train\", {}, Training_target_temp+\"/output.json\", Training_source_temp)\n"," register_coco_instances(\"my_dataset_val\", {}, Validation_target_temp+\"/output.json\", Validation_source_temp)\n"," \n","#Failsafe for later\n","cell_ran_QC_training_dataset = 1"]},{"cell_type":"markdown","metadata":{"id":"WZDvRjLZu-Lm"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","It is good practice to evaluate the training progress by studying if your model is slowly improving over time. The following cell will allow you to load Tensorboard and investigate how several metric evolved over time (iterations).\n","\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"cap-cHIfNZnm"},"outputs":[],"source":["#@markdown ##Play the cell to load tensorboard\n","%load_ext tensorboard\n","%tensorboard --logdir \"$full_QC_model_path\""]},{"cell_type":"markdown","metadata":{"id":"lreUY7-SsGkI"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will compare the predictions generated by your model against ground-truth. Additionally, the below cell will show the mAP value of the model on the QC data If you want to read in more detail about this score, we recommend [this brief explanation](https://medium.com/@jonathan_hui/map-mean-average-precision-for-object-detection-45c121a31173).\n","\n"," The images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" should contain images (e.g. as .png) and annotations (.xml files)!\n","\n","\n","**mAP score:** This refers to the mean average precision of the model on the given dataset. This value gives an indication how precise the predictions of the classes on this dataset are when compared to the ground-truth. Values closer to 1 indicate a good fit.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"kjbHJHbtsg2R"},"outputs":[],"source":["#@markdown ##Choose the folders that contain your Quality Control dataset\n","\n","Source_QC_folder = \"\" #@param{type:\"string\"}\n","Target_QC_folder = \"\" #@param{type:\"string\"}\n","\n","if cell_ran_QC_QC_dataset == 0:\n","#Save the list of labels as text file \n"," with open(base_path + '/labels_QC.txt', 'w') as f:\n"," for item in list_of_labels_QC:\n"," print(item, file=f)\n","\n","#Here we create temp folder for the QC\n","\n"," QC_source_temp = base_path + \"/QC_source\"\n","\n"," if os.path.exists(QC_source_temp):\n"," shutil.rmtree(QC_source_temp)\n"," os.makedirs(QC_source_temp)\n","\n"," QC_target_temp = base_path + \"/QC_target\"\n"," if os.path.exists(QC_target_temp):\n"," shutil.rmtree(QC_target_temp)\n"," os.makedirs(QC_target_temp)\n","\n","# Create a quality control/Prediction Folder\n"," if os.path.exists(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\"):\n"," shutil.rmtree(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n"," os.makedirs(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","\n","#Here we move the QC files to the temp\n","\n"," for f in os.listdir(os.path.join(Source_QC_folder)):\n"," shutil.copy(Source_QC_folder+\"/\"+f, QC_source_temp+\"/\"+f)\n","\n"," for p in os.listdir(os.path.join(Target_QC_folder)):\n"," shutil.copy(Target_QC_folder+\"/\"+p, QC_target_temp+\"/\"+p)\n","\n","#Here we convert the XML files into JSON\n","#Save the list of files\n","\n"," list_source_QC_temp = os.listdir(os.path.join(QC_source_temp))\n","\n"," name_no_extension_QC = []\n"," for n in list_source_QC_temp:\n"," name_no_extension_QC.append(os.path.splitext(n)[0])\n","\n"," with open(base_path + '/QC_files.txt', 'w') as f:\n"," for item in name_no_extension_QC:\n"," print(item, end='\\n', file=f)\n","\n","#Convert XML into JSON\n"," file_output_QC = QC_target_temp+\"/output.json\"\n","\n"," os.chdir(base_path + \"\")\n"," !python voc2coco.py --ann_dir \"$QC_target_temp\" --output \"$file_output_QC\" --ann_ids base_path + \"/QC_files.txt\" --labels base_path + \"/labels.txt\" --ext xml\n","\n"," os.chdir(\"/\")\n","\n","\n","#Here we register the QC dataset\n"," register_coco_instances(\"my_dataset_QC\", {}, QC_target_temp+\"/output.json\", QC_source_temp)\n"," cell_ran_QC_QC_dataset = 1\n","\n","\n","#Load the model to use\n","cfg = get_cfg()\n","cfg.merge_from_file(full_QC_model_path+\"config.yaml\")\n","cfg.MODEL.WEIGHTS = os.path.join(full_QC_model_path, \"model_final.pth\")\n","cfg.DATASETS.TEST = (\"my_dataset_QC\", )\n","cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n","\n","#Metadata\n","test_metadata = MetadataCatalog.get(\"my_dataset_QC\")\n","test_metadata.set(thing_color = color_list)\n","\n","# For the evaluation we need to load the trainer\n","trainer = CocoTrainer(cfg)\n","trainer.resume_or_load(resume=True)\n","\n","# Here we need to load the predictor\n","\n","predictor = DefaultPredictor(cfg)\n","evaluator = COCOEvaluator(\"my_dataset_QC\", cfg, False, output_dir=QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Prediction\")\n","val_loader = build_detection_test_loader(cfg, \"my_dataset_QC\")\n","inference_on_dataset(trainer.model, val_loader, evaluator)\n","\n","\n","print(\"A prediction is displayed\")\n","\n","dataset_QC_dicts = DatasetCatalog.get(\"my_dataset_QC\")\n","\n","for d in random.sample(dataset_QC_dicts, 1):\n"," print(\"Ground Truth\")\n"," img = cv2.imread(d[\"file_name\"])\n"," visualizer = Visualizer(img[:, :, ::-1], metadata=test_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.5)\n"," vis = visualizer.draw_dataset_dict(d)\n"," cv2_imshow(vis.get_image()[:, :, ::-1])\n","\n"," print(\"A prediction is displayed\")\n"," im = cv2.imread(d[\"file_name\"])\n"," outputs = predictor(im)\n"," v = Visualizer(im[:, :, ::-1],\n"," metadata=test_metadata,\n"," instance_mode=ColorMode.SEGMENTATION, \n"," scale=0.5\n"," )\n"," out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n"," cv2_imshow(out.get_image()[:, :, ::-1])\n","\n","cell_ran_QC_QC_dataset = 1"]},{"cell_type":"markdown","metadata":{"id":"DWAhOBc7gpzN"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"KAILvLGFS2-1"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If an older model needs to be used, please untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contains the images that you want to predict using the network that you will train.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"lp-cx8TDIGI-"},"outputs":[],"source":["\n","#@markdown ### Provide the path to your dataset and to the folder where the prediction will be saved, then play the cell to predict output on your unseen images.\n","\n","#@markdown ###Path to data to analyse and where predicted output should be saved:\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the path to the model folder as well as the location of your training dataset:\n","\n","#@markdown ####Path to trained model to be assessed: \n","\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","\n","#Here we find the loaded model name and parent path\n","Prediction_model_name = os.path.basename(Prediction_model_folder)\n","Prediction_model_path = os.path.dirname(Prediction_model_folder)\n","\n","if (Use_the_current_trained_model): \n"," print(\"Using current trained network\")\n"," Prediction_model_name = model_name\n"," Prediction_model_path = model_path\n","\n","full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'\n","\n","\n","if os.path.exists(full_Prediction_model_path):\n"," print(\"The \"+Prediction_model_name+\" network will be used.\")\n","else:\n"," print(bcolors.WARNING +'!! WARNING: The chosen model does not exist !!')\n"," print('Please make sure you provide a valid model path and model name before proceeding further.')\n","\n","#Here we will load the label file\n","\n","list_of_labels_predictions =[]\n","with open(full_Prediction_model_path+'labels.txt', newline='') as csvfile:\n"," reader = csv.reader(csvfile)\n"," for row in csv.reader(csvfile):\n"," list_of_labels_predictions.append(row[0])\n","\n","#Here we create a list of color\n","color_list = []\n","for i in range(len(list_of_labels_predictions)):\n"," color = list(np.random.choice(range(256), size=3))\n"," color_list.append(color)\n","\n","#Activate the pretrained model. \n","# Create config\n","cfg = get_cfg()\n","cfg.merge_from_file(full_Prediction_model_path+\"config.yaml\")\n","cfg.MODEL.WEIGHTS = os.path.join(full_Prediction_model_path, \"model_final.pth\")\n","cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\n","\n","# Create predictor\n","predictor = DefaultPredictor(cfg)\n","\n","#Load the metadata from the prediction file\n","prediction_metadata = Metadata()\n","prediction_metadata.set(thing_classes = list_of_labels_predictions)\n","prediction_metadata.set(thing_color = color_list)\n","\n","start = datetime.now()\n","\n","validation_folder = Path(Data_folder)\n","\n","for i, file in enumerate(validation_folder.glob(\"*.png\")):\n"," # this loop opens the .png files from the val-folder, creates a dict with the file\n"," # information, plots visualizations and saves the result as .pkl files.\n"," file = str(file)\n"," file_name = file.split(\"/\")[-1]\n"," im = cv2.imread(file)\n","\n"," #Prediction are done here\n"," outputs = predictor(im)\n","\n"," #here we extract the results into numpy arrays\n","\n"," Classes_predictions = outputs[\"instances\"].pred_classes.cpu().data.numpy()\n","\n"," boxes_predictions = outputs[\"instances\"].pred_boxes.tensor.cpu().numpy()\n"," Score_predictions = outputs[\"instances\"].scores.cpu().data.numpy()\n"," \n"," #here we save the results into a csv file\n"," prediction_csv = Result_folder+\"/\"+file_name+\"_predictions.csv\"\n","\n"," with open(prediction_csv, 'w') as f:\n"," writer = csv.writer(f)\n"," writer.writerow(['x1','y1','x2','y2','box width','box height', 'class', 'score' ]) \n","\n"," for i in range(len(boxes_predictions)):\n","\n"," x1 = boxes_predictions[i][0]\n"," y1 = boxes_predictions[i][1]\n"," x2 = boxes_predictions[i][2]\n"," y2 = boxes_predictions[i][3]\n"," box_width = x2 - x1\n"," box_height = y2 -y1\n","\n"," writer.writerow([str(x1), str(y1), str(x2), str(y2), str(box_width), str(box_height), str(list_of_labels_predictions[Classes_predictions[i]]), Score_predictions[i]])\n","\n","\n","# The last example is displayed \n","v = Visualizer(im, metadata=prediction_metadata, instance_mode=ColorMode.SEGMENTATION, scale=1)\n","v = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\")) \n","plt.figure(figsize=(20,20))\n","plt.imshow(v.get_image()[:, :, ::-1])\n","plt.axis('off');\n","plt.savefig(Result_folder+\"/\"+file_name)\n"," \n","print(\"Time needed for inferencing:\", datetime.now() - start)\n","\n"]},{"cell_type":"markdown","metadata":{"id":"wgO7Ok1PBFQj"},"source":["## **6.2. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"ir5oDtGF-34t"},"source":["# **7. Version log**\n","---\n","**v1.13**: \n","\n","* The section 1 and 2 are now swapped for better export of *requirements.txt*.\n","\n","* This version also now includes built-in version check and the version log that you're reading now."]},{"cell_type":"markdown","metadata":{"id":"nlyPYwZu4VVS"},"source":["#**Thank you for using Detectron2 2D!**"]}],"metadata":{"accelerator":"GPU","colab":{"collapsed_sections":[],"name":"Detectron2_2D_ZeroCostDL4Mic.ipynb","provenance":[{"file_id":"1hzAI0joLETcG5sI2Qvo8AKDr0TWRKySJ","timestamp":1587653755731},{"file_id":"1QFcz4NnQv4rMwDNl7AzHajN-Ola9sUFW","timestamp":1586411847878},{"file_id":"12UDRQ7abcnXcf5FctR9IUStgCpBiQWn7","timestamp":1584466922281},{"file_id":"1zXCn3A39GI1MCnXK_g_Z-AWh9vkB0YhU","timestamp":1583244415636}]},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.9"}},"nbformat":4,"nbformat_minor":0} diff --git a/Colab_notebooks/EmbedSeg_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/EmbedSeg_2D_ZeroCostDL4Mic.ipynb index c34849e6..679dbd6e 100644 --- a/Colab_notebooks/EmbedSeg_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/EmbedSeg_2D_ZeroCostDL4Mic.ipynb @@ -277,6 +277,10 @@ "#Here we define where all the temporary files are saved\n", "\n", "import os\n", + "\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", + "\n", "data_dir = os.path.join(os.path.abspath(os.getcwd()), \"data_dir\")\n", "print (\"Data directory path is set as {}\" .format(data_dir))\n", "#============================================================\n", @@ -1401,15 +1405,7 @@ "id": "01Djr8v-5pPk", "outputId": "abe238c9-b9b5-4996-f7db-59eedbf7d8c6" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Mounted at /content/gdrive\n" - ] - } - ], + "outputs": [], "source": [ "#@markdown ##Play the cell to connect your Google Drive to Colab\n", "\n", @@ -1425,7 +1421,7 @@ "\n", "# mount user's Google Drive to Google Colab.\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')\n", + "drive.mount(base_path + '/gdrive')\n", "\n", "\n", "\n" @@ -2071,7 +2067,7 @@ " Training_target_dir = Training_target\n", "\n", "#Here we split the training dataset between training, validation and test and we neatly organise everything into folders\n", - "# Everything is copied in the /Content Folder\n", + "# Everything is copied in the base_path Folder\n", "\n", "print('--------------------------------------------------------------------------------------------------------')\n", "print('Creating a temporary directory...')\n", diff --git a/Colab_notebooks/MaskRCNN_ZeroCostDL4Mic.ipynb b/Colab_notebooks/MaskRCNN_ZeroCostDL4Mic.ipynb index d720d604..76811445 100644 --- a/Colab_notebooks/MaskRCNN_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/MaskRCNN_ZeroCostDL4Mic.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"YrTo6T74i7s0"},"source":["# **MaskRCNN**\n","\n","---\n","\n"," This notebook is an implementation of MaskRCNN. This neural network performs instance segmentation. This means it can be used to detect objects in images, segment these objects and classify them. This notebook is based on the work of [He et al.](https://arxiv.org/abs/1703.06870)\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki)\n","\n","This notebook is based on the following paper: \n","\n","**Mask R-CNN**, arxiv, 2018 by Kaiming He, Georgia Gkioxari, Piotr Dollár, Ross Girshick [here](https://arxiv.org/abs/1703.06870)\n","\n","And source code found in: *https://github.com/matterport/Mask_RCNN*\n","\n","Provide information on dataset availability and link for download if applicable.\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"RZL8pqcEi0KY"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use ZeroCostDL4Mic notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"3yywetML0lUX"},"source":["#**0. Before getting started**\n","---\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that while the file format is flexible (.tif, .png, .jpeg should all work) but these currently **must be of RGB** type.\n","\n","Here's the data structure that you should use:\n","* Experiment A\n"," - **Training dataset**\n"," - Training\n"," - img_1.png, img_1.png.csv, img_2.png, img_2.png.csv, ...\n"," - Validation\n"," - img_a.png, img_a.png.csv, img_b.png, img_b.png.csv,...\n"," - **Quality control dataset**\n"," - Validation\n"," - img_a.png, img_a.png.csv, img_b.png, img_b.png.csv\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","\n"," **Note: This notebook is still in the beta stage.\n","Currently, the notebook works only if the annotation files are in csv format with the following columns:**\n","\n","***| filename | width | height | object_index | class_name | x | y |***\n","\n","where each row in the csv will provide the coordinates **(x,y)** of an edge point in the segmentation mask of an individual object with a dedicated **object_index** (e.g. 1, 2, 3....) and its **class_name** (e.g. 'nucleus' or 'horse' etc.) on the image of dimensions **width** x **height** (pixels). If you already have a dataset with segmentation masks we can provide a fiji macro that can convert the dataset into the correct format.\n","*We are actively working on integrating more flexibility into the annotations this notebook can be used with.*\n","\n","---\n","\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"ffNw8dIQjftT"},"source":["# **1. Install MaskRCNN and dependencies**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"iYBjQqd95MpG"},"source":["## **1.1. Install key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"UTratIh3-Zl_"},"outputs":[],"source":["#@markdown ##Install MaskRCNN and dependencies\n","!pip install fpdf2\n","!pip install imgaug\n","!pip install h5py==2.10\n","!git clone https://github.com/matterport/Mask_RCNN\n","\n","#Force session restart\n","exit(0)"]},{"cell_type":"markdown","metadata":{"id":"c3JUL5cQ5cY-"},"source":["## **1.2. Restart your runtime**\n","---\n","\n","\n","\n","** Ignore the following message error message. Your Runtime has automatically restarted. This is normal.**\n","\n","\"\"
\n"]},{"cell_type":"markdown","metadata":{"id":"eLGtfVWE6lu9"},"source":["## **1.3. Load key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"laDhajuKOs9t"},"outputs":[],"source":["Notebook_version = '1.13'\n","Network = 'MaskRCNN'\n","\n","from builtins import any as b_any\n","\n","def get_requirements_path():\n"," # Store requirements file in 'contents' directory\n"," current_dir = os.getcwd()\n"," dir_count = current_dir.count('/') - 1\n"," path = '../' * (dir_count) + 'requirements.txt'\n"," return path\n","\n","def filter_files(file_list, filter_list):\n"," filtered_list = []\n"," for fname in file_list:\n"," if b_any(fname.split('==')[0] in s for s in filter_list):\n"," filtered_list.append(fname)\n"," return filtered_list\n","\n","def build_requirements_file(before, after):\n"," path = get_requirements_path()\n","\n"," # Exporting requirements.txt for local run\n"," !pip freeze > $path\n","\n"," # Get minimum requirements file\n"," df = pd.read_csv(path)\n"," mod_list = [m.split('.')[0] for m in after if not m in before]\n"," req_list_temp = df.values.tolist()\n"," req_list = [x[0] for x in req_list_temp]\n","\n"," # Replace with package name and handle cases where import name is different to module name\n"," mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n"," mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]\n"," filtered_list = filter_files(req_list, mod_replace_list)\n","\n"," file=open(path,'w')\n"," for item in filtered_list:\n"," file.writelines(item)\n","\n"," file.close()\n","\n","import sys\n","before = [str(m) for m in sys.modules]\n","\n","#@markdown ##Load Key Dependencies\n","%tensorflow_version 1.x\n","\n","import os\n","import sys\n","import json\n","import datetime\n","import time\n","import numpy as np\n","import skimage.draw\n","from skimage import io\n","import imgaug\n","import pandas as pd\n","import csv\n","import random\n","import datetime\n","import shutil\n","from matplotlib import pyplot as plt\n","import matplotlib.lines as lines\n","from matplotlib.patches import Polygon\n","import IPython.display\n","from PIL import Image, ImageDraw, ImageFont\n","from fpdf import FPDF, HTMLMixin \n","from pip._internal.operations.freeze import freeze\n","import subprocess as sp\n","\n","# Root directory of the project\n","ROOT_DIR = os.path.abspath(\"/content\")\n","# !git clone https://github.com/matterport/Mask_RCNN\n","# Import Mask RCNN\n","sys.path.append(ROOT_DIR) # To find local version of the library\n","os.chdir('/content/Mask_RCNN')\n","\n","#Here we need to replace \"self.keras_model.metrics_tensors.append(loss)\" with \"self.keras_model.add_metric(loss, name)\"\n","# in model.py line 2199, otherwise we get version issues.\n","from tempfile import mkstemp\n","from shutil import move, copymode\n","from os import fdopen, remove\n","#This function replaces the old default files with new values\n","def replace(file_path, pattern, subst):\n"," #Create temp file\n"," fh, abs_path = mkstemp()\n"," with fdopen(fh,'w') as new_file:\n"," with open(file_path) as old_file:\n"," for line in old_file:\n"," new_file.write(line.replace(pattern, subst))\n"," #Copy the file permissions from the old file to the new file\n"," copymode(file_path, abs_path)\n"," #Remove original file\n"," remove(file_path)\n"," #Move new file\n"," move(abs_path, file_path)\n","\n","replace(\"/content/Mask_RCNN/mrcnn/model.py\",'self.keras_model.metrics_tensors.append(loss)','self.keras_model.add_metric(loss, name)')\n","#replace(\"/content/Mask_RCNN/mrcnn/model.py\", \"save_weights_only=True),\", \"save_weights_only=True),\\n keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 30, min_lr = 0, verbose = 1)\")\n","#replace(\"/content/Mask_RCNN/mrcnn/model.py\", \"save_weights_only=True),\", \"save_weights_only=True),\\n keras.callbacks.CSVLogger('/content/results.csv'),\")\n","replace(\"/content/Mask_RCNN/mrcnn/model.py\",'workers = 0','workers = 1')\n","replace(\"/content/Mask_RCNN/mrcnn/model.py\",'workers = multiprocessing.cpu_count()','workers = 1')\n","replace(\"/content/Mask_RCNN/mrcnn/model.py\",'use_multiprocessing=True','use_multiprocessing=False')\n","replace(\"/content/Mask_RCNN/mrcnn/utils.py\",\"shift = np.array([0, 0, 1, 1])\",\"shift = np.array([0., 0., 1., 1.])\")\n","replace(\"/content/Mask_RCNN/mrcnn/visualize.py\", \"i += 1\",\"i += 1\\n plt.savefig('/content/TrainingDataExample_MaskRCNN.png',bbox_inches='tight',pad_inches=0)\")\n","#replace(\"/content/Mask_RCNN/mrcnn/model.py\",\" class_ids\",\" if config.NUM_CLASSES == 2:\\n class_ids = tf.ones_like(probs[:, 0], dtype=tf.int32)\\n else:\\n class_ids\")\n","\n","#Using this command will allow display of detections below the 0.5 score threshold, if only 1 class beyond background is in the dataset\n","replace(\"/content/Mask_RCNN/mrcnn/model.py\",\"class_ids = tf.argmax(probs\",\"if config.NUM_CLASSES >= 2:\\n class_ids = tf.ones_like(probs[:, 0], dtype=tf.int32)\\n else:\\n class_ids = tf.argmax(probs\")\n","\n","\n","from mrcnn.config import Config\n","from mrcnn import model as modellib, utils\n","from mrcnn import visualize\n","from mrcnn.model import log\n","from mrcnn import utils\n","\n","def get_ax(rows=1, cols=1, size=8):\n"," \"\"\"Return a Matplotlib Axes array to be used in\n"," all visualizations in the notebook. Provide a\n"," central point to control graph sizes.\n"," \n"," Change the default size attribute to control the size\n"," of rendered images\n"," \"\"\"\n"," _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n"," return ax\n","\n","############################################################\n","# Dataset\n","############################################################\n","\n","class ClassDataset(utils.Dataset):\n"," def load_coco(annotation_file):\n"," dataset = json.load(open(annotation_file, 'r'))\n"," assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n"," self.dataset = dataset\n"," self.createIndex()\n","\n"," def createIndex(self):\n"," # create index\n"," print('creating index...')\n"," anns, cats, imgs = {}, {}, {}\n"," imgToAnns,catToImgs = defaultdict(list),defaultdict(list)\n"," if 'annotations' in self.dataset:\n"," for ann in self.dataset['annotations']:\n"," imgToAnns[ann['image_id']].append(ann)\n"," anns[ann['id']] = ann\n","\n"," if 'images' in self.dataset:\n"," for img in self.dataset['images']:\n"," imgs[img['id']] = img\n","\n"," if 'categories' in self.dataset:\n"," for cat in self.dataset['categories']:\n"," cats[cat['id']] = cat\n","\n"," if 'annotations' in self.dataset and 'categories' in self.dataset:\n"," for ann in self.dataset['annotations']:\n"," catToImgs[ann['category_id']].append(ann['image_id'])\n","\n"," print('index created!')\n","\n"," # create class members\n"," self.anns = anns\n"," self.imgToAnns = imgToAnns\n"," self.catToImgs = catToImgs\n"," self.imgs = imgs\n"," self.cats = cats\n","\n"," def load_class(self, dataset_dir, subset):\n"," \"\"\"Load a subset of the dataset.\n"," dataset_dir: Root directory of the dataset.\n"," subset: Subset to load: train or val\n"," \"\"\"\n","\n"," # Add classes. We have only one class to add.\n"," self.add_class(\"Training_Datasets\", 1, \"nucleus\")\n"," \n"," # Train or validation dataset?\n"," assert subset in [\"Training\", \"Validation\"]\n"," dataset_dir = os.path.join(dataset_dir, subset)\n","\n"," # Load annotations\n"," # VGG Image Annotator (up to version 1.6) saves each image in the form:\n"," # { 'filename': '28503151_5b5b7ec140_b.jpg',\n"," # 'regions': {\n"," # '0': {\n"," # 'region_attributes': {},\n"," # 'shape_attributes': {\n"," # 'all_points_x': [...],\n"," # 'all_points_y': [...],\n"," # 'name': 'polygon'}},\n"," # ... more regions ...\n"," # },\n"," # 'size': 100202\n"," # }\n"," # We mostly care about the x and y coordinates of each region\n"," # Note: In VIA 2.0, regions was changed from a dict to a list.\n"," annotations = json.load(open(os.path.join(dataset_dir, \"birds071220220_json.json\")))\n"," annotations = list(annotations.values()) # don't need the dict keys\n"," \n"," # The VIA tool saves images in the JSON even if they don't have any\n"," # annotations. Skip unannotated images.\n"," annotations = [a for a in annotations if a['regions']]\n"," \n"," # Add images\n"," for a in annotations:\n"," # Get the x, y coordinaets of points of the polygons that make up\n"," # the outline of each object instance. These are stores in the\n"," # shape_attributes (see json format above)\n"," # The if condition is needed to support VIA versions 1.x and 2.x.\n"," if type(a['regions']) is dict:\n"," polygons = [r['shape_attributes'] for r in a['regions'].values()]\n"," else:\n"," polygons = [r['shape_attributes'] for r in a['regions']] \n","\n"," #Get the class of the object\n"," obj_class = [c['region_attributes']['species'] for c in a['regions']]\n","\n"," # load_mask() needs the image size to convert polygons to masks.\n"," # Unfortunately, VIA doesn't include it in JSON, so we must read\n"," # the image. This is only managable since the dataset is tiny.\n"," image_path = os.path.join(dataset_dir, a['filename'])\n"," image = skimage.io.imread(image_path)\n"," height, width = image.shape[:2]\n","\n"," self.add_image(\n"," \"Training_Datasets\",\n"," image_id=a['filename'], # use file name as a unique image id\n"," path=image_path,\n"," width=width, height=height,\n"," polygons=polygons,\n"," obj_class=obj_class)\n"," \n"," def load_image_csv(self, dataset_dir, subset):\n"," # Add classes. We have only one class to add.\n"," # self.add_class(\"Training_Datasets\", 1, \"nucleus\")\n"," #self.add_class(\"Training_Datasets\", 2, \"Great tit\")\n"," \n"," # Train or validation dataset?\n"," assert subset in [\"Training\", \"Validation\"]\n"," dataset_dir = os.path.join(dataset_dir, subset)\n"," #Data Format\n"," #csv file:\n"," #filename,width,height,object_index, class_name, x, y\n"," #file_1,256,256,1,nucleus, 1, 1\n"," #file_1,256,256,1,nucleus, 3, 10\n"," #file_1,256,256,1,nucleus, 1, 3\n"," #file_1,256,256,1,nucleus, 3, 7\n"," #file_1,256,256,2,nucleus, 17, 20\n"," #...\n"," class_index = 0\n"," obj_class_old = \"\"\n"," #class_names will hold all the classes we find in the dataset \n"," class_names = {obj_class_old:class_index}\n"," for csv_file_name in os.listdir(dataset_dir):\n"," if csv_file_name.endswith('.csv'):\n"," with open(os.path.join(dataset_dir,csv_file_name)) as csvfile_count:\n"," row_count = sum(1 for _ in csvfile_count)\n"," with open(os.path.join(dataset_dir,csv_file_name)) as csvfile:\n"," annotations = csv.reader(csvfile)\n"," next(annotations)\n"," polygons = []\n"," x_values = []\n"," y_values = []\n"," index_old = 1\n"," for line in annotations:\n"," img_file_name = line[0]\n"," index_new = int(line[4])\n"," obj_class = line[3]\n"," \n"," if not obj_class in class_names:\n"," class_index+=1\n"," class_names[obj_class] = class_index\n"," self.add_class(\"Training_Datasets\", class_index, obj_class)\n"," \n"," if index_new == index_old:\n"," x_values.append(int(line[5]))\n"," y_values.append(int(line[6]))\n"," \n"," if row_count == annotations.line_num:\n"," polygon = {\"class_name\":class_names[obj_class],\"all_points_x\":x_values,\"all_points_y\":y_values}\n"," polygons.append(polygon)\n"," \n"," elif index_new != index_old:\n"," polygon = {\"class_name\":class_names[obj_class_old],\"all_points_x\":x_values,\"all_points_y\":y_values}\n"," polygons.append(polygon)\n"," x_values = []\n"," x_values.append(int(line[5]))\n"," y_values = []\n"," y_values.append(int(line[6]))\n"," \n"," index_old = int(line[4])\n"," obj_class_old = line[3]\n"," image_path = os.path.join(dataset_dir,img_file_name)\n"," \n"," self.add_image(\n"," \"Training_Datasets\",\n"," image_id=img_file_name, # use file name as a unique image id\n"," path=image_path,\n"," width=int(line[1]), height=int(line[2]),\n"," polygons=polygons)\n"," #print(csv_file_name, class_index, polygons)\n"," return class_index\n","\n"," def load_mask(self, image_id):\n"," info = self.image_info[image_id]\n"," #print(info)\n"," mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n"," dtype=np.uint8)\n"," class_ids = []\n"," #class_index = 0\n"," for i, p in enumerate(info[\"polygons\"]):\n"," \n"," class_name = p['class_name']\n"," # class_names = {class_name:class_index}\n"," # if class_name != class_name_old:\n"," # class_index+=1\n"," # class_names[class_name] = class_index\n"," \n"," # Get indexes of pixels inside the polygon and set them to 1\n"," # print(p['y_values'])\n"," rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n"," mask[rr, cc, i] = 1\n"," \n"," #class_name_old = p['class_name']\n"," class_ids.append(class_name)\n"," \n"," class_ids = np.array(class_ids)\n","\n"," return mask.astype(np.bool), class_ids.astype(np.int32)\n","\n"," # def load_mask(self, image_id):\n"," # \"\"\"Generate instance masks for an image.\n"," # Returns:\n"," # masks: A bool array of shape [height, width, instance count] with\n"," # one mask per instance.\n"," # class_ids: a 1D array of class IDs of the instance masks.\n"," # \"\"\"\n"," # def clean_name(name):\n"," # \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n"," # return \",\".join(name.split(\",\")[:1])\n","\n"," # # If not a balloon dataset image, delegate to parent class.\n"," # image_info = self.image_info[image_id]\n"," # if image_info[\"source\"] != \"Training_Datasets\":\n"," # return super(self.__class__, self).load_mask(image_id)\n","\n"," # # Convert polygons to a bitmap mask of shape\n"," # # [height, width, instance_count]\n"," # info = self.image_info[image_id]\n","\n"," # mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n"," # dtype=np.uint8)\n"," # for i, p in enumerate(info[\"polygons\"]):\n"," # # Get indexes of pixels inside the polygon and set them to 1\n"," # rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n"," # mask[rr, cc, i] = 1\n","\n"," # classes = info[\"obj_class\"]\n"," # class_list = [clean_name(c[\"name\"]) for c in self.class_info]\n"," # class_ids = np.array([class_list.index(s) for s in classes])\n","\n"," # # Return mask, and array of class IDs of each instance. Since we have\n"," # # one class ID only, we return an array of 1s\n"," # return mask.astype(np.bool), class_ids.astype(np.int32)#np.ones([mask.shape[-1]], dtype=np.int32)\n","\n"," def image_reference(self, image_id):\n"," \"\"\"Return the path of the image.\"\"\"\n"," info = self.image_info[image_id]\n"," if info[\"source\"] == \"Training_Datasets\":\n"," return info[\"path\"]\n"," else:\n"," super(self.__class__, self).image_reference(image_id)\n","\n","\n","def train(model, augmentation=True):\n"," \"\"\"Train the model.\"\"\"\n"," # Training dataset.\n"," dataset_train = ClassDataset()\n"," dataset_train.load_class('/content/gdrive/MyDrive/MaskRCNN/Training_Datasets', \"Training\")\n"," dataset_train.prepare()\n","\n"," # Validation dataset\n"," dataset_val = ClassDataset()\n"," dataset_val.load_class('/content/gdrive/MyDrive/MaskRCNN/Training_Datasets', \"Validation\")\n"," dataset_val.prepare()\n","\n"," if augmentation == True:\n"," augment = imgaug.augmenters.Sometimes(0.5, imgaug.augmenters.OneOf([imgaug.augmenters.Fliplr(0.5),\n"," imgaug.augmenters.Flipud(0.5),\n"," imgaug.augmenters.Affine(rotate=45)]))\n"," else:\n"," augment = None\n"," # *** This training schedule is an example. Update to your needs ***\n"," # Since we're using a very small dataset, and starting from\n"," # COCO trained weights, we don't need to train too long. Also,\n"," # no need to train all layers, just the heads should do it.\n"," print(\"Training network heads\")\n"," model.train(dataset_train, dataset_val,\n"," learning_rate=config.LEARNING_RATE,\n"," epochs=80,\n"," augmentation = augment,\n"," layers='heads')\n","\n","\n","def train_csv(model, training_folder, augmentation=True, epochs = 20, layers = 'heads'):\n"," \"\"\"Train the model.\"\"\"\n"," # Training dataset.\n"," dataset_train = ClassDataset()\n"," dataset_train.load_image_csv(training_folder, \"Training\")\n"," dataset_train.prepare()\n","\n"," # Validation dataset\n"," dataset_val = ClassDataset()\n"," dataset_val.load_image_csv(training_folder, \"Validation\")\n"," dataset_val.prepare()\n","\n"," if augmentation == True:\n"," augment = imgaug.augmenters.SomeOf((1,2),[imgaug.augmenters.OneOf([imgaug.augmenters.Affine(rotate=90),\n"," imgaug.augmenters.Affine(rotate=180),\n"," imgaug.augmenters.Affine(rotate=270)]),\n"," imgaug.augmenters.Fliplr(0.5),\n"," imgaug.augmenters.Flipud(0.5),\n"," imgaug.augmenters.Multiply((0.8, 1.5)),\n"," imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))])\n"," else:\n"," augment = None\n"," # *** This training schedule is an example. Update to your needs ***\n"," # Since we're using a very small dataset, and starting from\n"," # COCO trained weights, we don't need to train too long. Also,\n"," # no need to train all layers, just the heads should do it.\n"," print(\"Training network heads\")\n"," model.train(dataset_train, dataset_val,\n"," learning_rate=config.LEARNING_RATE,\n"," epochs=epochs,\n"," augmentation = augment,\n"," layers=layers)\n","\n","def color_splash(image, mask):\n"," \"\"\"Apply color splash effect.\n"," image: RGB image [height, width, 3]\n"," mask: instance segmentation mask [height, width, instance count]\n"," Returns result image.\n"," \"\"\"\n"," # Make a grayscale copy of the image. The grayscale copy still\n"," # has 3 RGB channels, though.\n"," gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n"," # Copy color pixels from the original color image where mask is set\n"," if mask.shape[-1] > 0:\n"," # We're treating all instances as one, so collapse the mask into one layer\n"," mask = (np.sum(mask, -1, keepdims=True) >= 1)\n"," splash = np.where(mask, image, gray).astype(np.uint8)\n"," else:\n"," splash = gray.astype(np.uint8)\n"," return splash\n","\n","\n","def detect_and_color_splash(model, image_path=None, video_path=None):\n"," assert image_path or video_path\n","\n"," # Image or video?\n"," if image_path:\n"," # Run model detection and generate the color splash effect\n"," print(\"Running on {}\".format(args.image))\n"," # Read image\n"," image = skimage.io.imread(args.image)\n"," # Detect objects\n"," r = model.detect([image], verbose=1)[0]\n"," # Color splash\n"," splash = color_splash(image, r['masks'])\n"," # Save output\n"," file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n"," skimage.io.imsave(file_name, splash)\n"," elif video_path:\n"," import cv2\n"," # Video capture\n"," vcapture = cv2.VideoCapture(video_path)\n"," width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n"," height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n"," fps = vcapture.get(cv2.CAP_PROP_FPS)\n","\n"," # Define codec and create video writer\n"," file_name = \"splash_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n"," vwriter = cv2.VideoWriter(file_name,\n"," cv2.VideoWriter_fourcc(*'MJPG'),\n"," fps, (width, height))\n","\n"," count = 0\n"," success = True\n"," while success:\n"," print(\"frame: \", count)\n"," # Read next image\n"," success, image = vcapture.read()\n"," if success:\n"," # OpenCV returns images as BGR, convert to RGB\n"," image = image[..., ::-1]\n"," # Detect objects\n"," r = model.detect([image], verbose=0)[0]\n"," # Color splash\n"," splash = color_splash(image, r['masks'])\n"," # RGB -> BGR to save image to video\n"," splash = splash[..., ::-1]\n"," # Add image to video writer\n"," vwriter.write(splash)\n"," count += 1\n"," vwriter.release()\n"," print(\"Saved to \", file_name)\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n"," NORMAL = '\\033[0m'\n","\n","class ClassConfig(Config):\n"," \"\"\"Configuration for training on the toy dataset.\n"," Derives from the base Config class and overrides some values.\n"," \"\"\"\n"," # Give the configuration a recognizable name\n"," # We use a GPU with 12GB memory, which can fit two images.\n"," # Adjust down if you use a smaller GPU.\n"," IMAGES_PER_GPU = 1\n"," DETECTION_MIN_CONFIDENCE = 0\n"," NAME = \"nucleus\"\n"," # Backbone network architecture\n"," # Supported values are: resnet50, resnet101\n"," BACKBONE = \"resnet50\"\n"," # Input image resizing\n"," # Random crops of size 64x64\n"," IMAGE_RESIZE_MODE = \"crop\"\n"," IMAGE_MIN_DIM = 256\n"," IMAGE_MAX_DIM = 256\n"," IMAGE_MIN_SCALE = 2.0\n"," # Length of square anchor side in pixels\n"," RPN_ANCHOR_SCALES = (4, 8, 16, 32, 64)\n"," # ROIs kept after non-maximum supression (training and inference)\n"," POST_NMS_ROIS_TRAINING = 200\n"," POST_NMS_ROIS_INFERENCE = 400\n"," # Non-max suppression threshold to filter RPN proposals.\n"," # You can increase this during training to generate more propsals.\n"," RPN_NMS_THRESHOLD = 0.9\n"," # How many anchors per image to use for RPN training\n"," RPN_TRAIN_ANCHORS_PER_IMAGE = 64\n"," # Image mean (RGB)\n"," MEAN_PIXEL = np.array([43.53, 39.56, 48.22])\n"," # If enabled, resizes instance masks to a smaller size to reduce\n"," # memory load. Recommended when using high-resolution images.\n"," USE_MINI_MASK = True\n"," MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n"," TRAIN_ROIS_PER_IMAGE = 128\n"," # Maximum number of ground truth instances to use in one image\n"," MAX_GT_INSTANCES = 100\n"," # Max number of final detections per image\n"," DETECTION_MAX_INSTANCES = 200\n","\n","# Below we define a function which saves the predictions.\n","# It is from this branch:\n","# https://github.com/matterport/Mask_RCNN/commit/bc8f148b820ebd45246ed358a120c99b09798d71\n","\n","def save_image(image, image_name, boxes, masks, class_ids, scores, class_names, filter_classs_names=None,\n"," scores_thresh=0.1, save_dir=None, mode=0):\n"," \"\"\"\n"," image: image array\n"," image_name: image name\n"," boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n"," masks: [num_instances, height, width]\n"," class_ids: [num_instances]\n"," scores: confidence scores for each box\n"," class_names: list of class names of the dataset\n"," filter_classs_names: (optional) list of class names we want to draw\n"," scores_thresh: (optional) threshold of confidence scores\n"," save_dir: (optional) the path to store image\n"," mode: (optional) select the result which you want\n"," mode = 0 , save image with bbox,class_name,score and mask;\n"," mode = 1 , save image with bbox,class_name and score;\n"," mode = 2 , save image with class_name,score and mask;\n"," mode = 3 , save mask with black background;\n"," \"\"\"\n"," mode_list = [0, 1, 2, 3]\n"," assert mode in mode_list, \"mode's value should in mode_list %s\" % str(mode_list)\n","\n"," if save_dir is None:\n"," save_dir = os.path.join(os.getcwd(), \"output\")\n"," if not os.path.exists(save_dir):\n"," os.makedirs(save_dir)\n","\n"," useful_mask_indices = []\n","\n"," N = boxes.shape[0]\n"," if not N:\n"," print(\"\\n*** No instances in image %s to draw *** \\n\" % (image_name))\n"," return\n"," else:\n"," assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n","\n"," for i in range(N):\n"," # filter\n"," class_id = class_ids[i]\n"," score = scores[i] if scores is not None else None\n"," if score is None or score < scores_thresh:\n"," continue\n","\n"," label = class_names[class_id]\n"," if (filter_classs_names is not None) and (label not in filter_classs_names):\n"," continue\n","\n"," if not np.any(boxes[i]):\n"," # Skip this instance. Has no bbox. Likely lost in image cropping.\n"," continue\n","\n"," useful_mask_indices.append(i)\n","\n"," if len(useful_mask_indices) == 0:\n"," print(\"\\n*** No instances in image %s to draw *** \\n\" % (image_name))\n"," return\n","\n"," colors = visualize.random_colors(len(useful_mask_indices))\n","\n"," if mode != 3:\n"," masked_image = image.astype(np.uint8).copy()\n"," else:\n"," masked_image = np.zeros(image.shape).astype(np.uint8)\n","\n"," if mode != 1:\n"," for index, value in enumerate(useful_mask_indices):\n"," masked_image = visualize.apply_mask(masked_image, masks[:, :, value], colors[index])\n","\n"," masked_image = Image.fromarray(masked_image)\n","\n"," if mode == 3:\n"," masked_image.save(os.path.join(save_dir, '%s' % (image_name)))\n"," return\n","\n"," draw = ImageDraw.Draw(masked_image)\n"," colors = np.array(colors).astype(int) * 255\n","\n"," for index, value in enumerate(useful_mask_indices):\n"," class_id = class_ids[value]\n"," score = scores[value]\n"," label = class_names[class_id]\n","\n"," y1, x1, y2, x2 = boxes[value]\n"," if mode != 2:\n"," color = tuple(colors[index])\n"," draw.rectangle((x1, y1, x2, y2), outline=color)\n","\n"," # Label\n"," font = ImageFont.load_default()\n"," draw.text((x1, y1), \"%s %f\" % (label, score), (255, 255, 255), font)\n","\n"," masked_image.save(os.path.join(save_dir, '%s' % (image_name)))\n","\n","def pdf_export(config, trained = False, augmentation = False, pretrained_model = False):\n"," class MyFPDF(FPDF, HTMLMixin):\n"," pass\n","\n"," config_list = \"\"\n"," for a in dir(config):\n"," if not a.startswith(\"__\") and not callable(getattr(config, a)):\n"," config_list += \"{}: {}\\n\".format(a, getattr(config, a))\n"," \n"," pdf = MyFPDF()\n"," pdf.add_page()\n"," pdf.set_right_margin(-1)\n"," pdf.set_font(\"Arial\", size = 11, style='B') \n","\n"," Network = 'MaskRCNN'\n"," day = datetime.datetime.now()\n"," datetime_str = str(day)[0:10]\n","\n"," Header = 'Training report for '+Network+' model ('+model_name+'):\\nDate: '+datetime_str\n"," pdf.multi_cell(180, 5, txt = Header, align = 'L') \n","\n"," # add another cell\n"," if trained:\n"," training_time = \"Training time: \"+str(hour)+ \"hour(s) \"+str(mins)+\"min(s) \"+str(round(sec))+\"sec(s)\"\n"," pdf.cell(190, 5, txt = training_time, ln = 1, align='L')\n"," pdf.ln(1)\n","\n"," Header_2 = 'Information for your materials and methods:'\n"," pdf.cell(190, 5, txt=Header_2, ln=1, align='L')\n","\n"," all_packages = ''\n"," for requirement in freeze(local_only=True):\n"," all_packages = all_packages+requirement+', '\n"," #print(all_packages)\n","\n"," #Main Packages\n"," main_packages = ''\n"," version_numbers = []\n"," for name in ['tensorflow','numpy','Keras']:\n"," find_name=all_packages.find(name)\n"," main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '\n"," #Version numbers only here:\n"," version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])\n","\n"," try:\n"," cuda_version = subprocess.run([\"nvcc\",\"--version\"],stdout=subprocess.PIPE)\n"," cuda_version = cuda_version.stdout.decode('utf-8')\n"," cuda_version = cuda_version[cuda_version.find(', V')+3:-1]\n"," except:\n"," cuda_version = ' - No cuda found - '\n"," try:\n"," gpu_name = subprocess.run([\"nvidia-smi\"],stdout=subprocess.PIPE)\n"," gpu_name = gpu_name.stdout.decode('utf-8')\n"," gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]\n"," except:\n"," gpu_name = ' - No GPU found - '\n"," #print(cuda_version[cuda_version.find(', V')+3:-1])\n"," #print(gpu_name)\n"," try:\n"," shape = io.imread(Training_source+'/Training/'+os.listdir(Training_source+'/Training')[0]).shape\n"," except:\n"," shape = io.imread(Training_source+'/Training/'+os.listdir(Training_source+'/Training')[0][:-4]).shape\n"," dataset_size = len(os.listdir(Training_source))/2\n","\n"," text = 'The '+Network+' model was trained using weights initialised on the coco dataset for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' labelled images (image dimensions: '+str(shape)+') with a batch size of '+str(config.BATCH_SIZE)+' and custom loss functions for region proposal and classification, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'\n","\n"," if pretrained_model:\n"," text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' labelled images (image dimensions: '+str(shape)+') with a batch size of '+str(config.BATCH_SIZE)+' and custom loss functions for region proposal and classification, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was retrained from a previous model checkpoint (model: '+os.path.basename(pretrained_model_path)[:-8]+', checkpoint: '+str(int(pretrained_model_path[-7:-3]))+'). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'\n","\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n"," pdf.multi_cell(190, 5, txt = text, align='L')\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.ln(1)\n"," pdf.cell(28, 5, txt='Augmentation: ', ln=0)\n"," pdf.set_font('')\n"," if augmentation:\n"," aug_text = 'The dataset was augmented by vertical and horizontal flipping'\n"," # if multiply_dataset_by >= 2:\n"," # aug_text = aug_text+'\\n- flipping'\n"," # if multiply_dataset_by > 2:\n"," # aug_text = aug_text+'\\n- rotation'\n"," else:\n"," aug_text = 'No augmentation was used for training.'\n"," pdf.multi_cell(190, 5, txt=aug_text, align='L')\n"," pdf.ln(1)\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(1)\n"," pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n"," # if Use_Default_Advanced_Parameters:\n"," # pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')\n"," pdf.cell(200, 5, txt='The following parameters were used for training:')\n"," pdf.ln(4)\n"," pdf.multi_cell(200, 5, txt=config_list)\n"," pdf.ln(1)\n","\n"," pdf.set_font(\"Arial\", size = 11, style='B')\n"," pdf.ln(1)\n"," pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)\n"," pdf.set_font('')\n"," pdf.multi_cell(170, 5, txt = Training_source+'/Training', align = 'L')\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(29, 5, txt= 'Validation:', align = 'L', ln=0)\n"," pdf.set_font('')\n"," pdf.multi_cell(170, 5, txt = Training_source+'/Validation', align = 'L')\n"," #pdf.cell(190, 5, txt=aug_text, align='L', ln=1)\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)\n"," pdf.set_font('')\n"," pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')\n"," pdf.ln(1)\n"," pdf.cell(60, 5, txt = 'Example ground-truth annotation', ln=1)\n"," pdf.ln(1)\n"," exp_size = io.imread('/content/TrainingDataExample_MaskRCNN.png').shape\n"," pdf.image('/content/TrainingDataExample_MaskRCNN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n"," pdf.ln(1)\n"," ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n"," pdf.multi_cell(190, 5, txt = ref_1, align='L')\n"," pdf.ln(1)\n"," ref_2 = '- MaskRCNN: Kaiming He, Georgia Gkioxari, Piotr Dollár, Ross Girshick. \"Mask R - CNN\" arxiv. 2018.'\n"," pdf.multi_cell(190, 5, txt = ref_2, align='L')\n"," pdf.ln(1)\n"," if augmentation:\n"," ref_3 = '- imgaug: Jung, Alexander et al., https://github.com/aleju/imgaug, (2020)'\n"," pdf.multi_cell(190, 5, txt = ref_3, align='L')\n"," pdf.ln(1)\n"," pdf.ln(3)\n"," reminder = 'Important:\\nRemember to perform the quality control step on all newly trained models\\nPlease consider depositing your training dataset on Zenodo'\n"," pdf.set_font('Arial', size = 11, style='B')\n"," pdf.multi_cell(190, 5, txt=reminder, align='C')\n"," pdf.ln(1)\n","\n"," pdf.output(os.path.dirname(model.log_dir)+'/'+model_name+'_training_report.pdf')\n","\n"," print('------------------------------')\n"," print('PDF report exported in '+model_path+'/'+model_name+'/')\n","\n","def qc_pdf_export():\n"," class MyFPDF(FPDF, HTMLMixin):\n"," pass\n","\n"," pdf = MyFPDF()\n"," pdf.add_page()\n"," pdf.set_right_margin(-1)\n"," pdf.set_font(\"Arial\", size = 11, style='B') \n","\n"," Network = 'MaskRCNN'\n","\n"," day = datetime.datetime.now()\n"," datetime_str = str(day)[0:16]\n","\n"," Header = 'Quality Control report for '+Network+' model ('+QC_model_name+', checkpoint:'+str(Checkpoint)+')\\nDate and Time: '+datetime_str\n"," pdf.multi_cell(180, 5, txt = Header, align = 'L') \n"," pdf.ln(1)\n","\n"," all_packages = ''\n"," for requirement in freeze(local_only=True):\n"," all_packages = all_packages+requirement+', '\n","\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(2)\n"," pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')\n"," pdf.ln(1)\n"," if os.path.exists(QC_model_folder+'/Quality Control/lossCurveAndmAPPlots.png'):\n"," exp_size = io.imread(QC_model_folder+'/Quality Control/lossCurveAndmAPPlots.png').shape\n"," pdf.image(QC_model_folder+'/Quality Control/lossCurveAndmAPPlots.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n"," else:\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size=10)\n"," pdf.multi_cell(190, 5, txt='If you would like to see the evolution of the loss function during training please play the first cell of the QC section in the notebook.')\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(80, 5, txt = 'P-R curves for test dataset', ln=1, align='L')\n"," pdf.ln(2)\n"," #for i in range(len(AP)):\n"," # os.path.exists(QC_model_folder+'/Quality Control/P-R_curve_'+config['model']['labels'][i]+'.png'):\n"," exp_size = io.imread(QC_model_folder+'/Quality Control/P-R_curve_'+QC_model_name+'.png').shape\n"," pdf.ln(1)\n"," pdf.image(QC_model_folder+'/Quality Control/P-R_curve_'+QC_model_name+'.png', x=16, y=None, w=round(exp_size[1]/4), h=round(exp_size[0]/4))\n"," # else:\n"," # pdf.cell(100, 5, txt='For the class '+config['model']['labels'][i]+' the model did not predict any objects.', ln=1, align='L')\n"," pdf.ln(3)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(1)\n"," pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n","\n"," pdf.ln(1)\n"," html = \"\"\"\n"," \n"," \n"," \"\"\"\n"," with open(QC_model_folder+'/Quality Control/QC_results.csv', 'r') as csvfile:\n"," metrics = csv.reader(csvfile)\n"," header = next(metrics)\n"," class_name = header[0]\n"," gt = header[1]\n"," tp = header[2]\n"," fn = header[3]\n"," iou = header[4]\n"," mAP = header[5]\n"," header = \"\"\"\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \"\"\".format(class_name,gt,tp,fn,iou,mAP)\n"," html = html+header\n"," i=0\n"," for row in metrics:\n"," i+=1\n"," class_name = row[0]\n"," gt = row[1]\n"," tp = row[2]\n"," fn = row[3]\n"," iou = row[4]\n"," mAP = row[5]\n"," cells = \"\"\"\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \"\"\".format(class_name,str(gt),str(tp),str(fn),str(iou),str(mAP))\n"," html = html+cells\n"," html = html+\"\"\"
{0}{1}{2}{3}{4}{5}
{0}{1}{2}{3}{4}{5}
\"\"\"\n","\n"," pdf.write_html(html)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(3)\n"," pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)\n"," pdf.ln(3)\n"," exp_size = io.imread(QC_model_folder+'/Quality Control/QC_example_data.png').shape\n"," pdf.image(QC_model_folder+'/Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/10))\n","\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n"," pdf.ln(3)\n"," ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n"," pdf.multi_cell(190, 5, txt = ref_1, align='L')\n"," pdf.ln(1)\n"," ref_2 = '- MaskRCNN: Kaiming He, Georgia Gkioxari, Piotr Dollár, Ross Girshick. \"Mask R - CNN\" arxiv. 2018.'\n"," pdf.multi_cell(190, 5, txt = ref_2, align='L')\n"," pdf.ln(1)\n","\n"," pdf.ln(3)\n"," reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'\n","\n"," pdf.set_font('Arial', size = 11, style='B')\n"," pdf.multi_cell(190, 5, txt=reminder, align='C')\n"," pdf.ln(1)\n","\n"," pdf.output(QC_model_folder+'/Quality Control/'+QC_model_name+'_QC_report.pdf')\n","\n","\n"," print('------------------------------')\n"," print('PDF report exported in '+QC_model_folder+'/Quality Control/')\n","\n","\n","# Check if this is the latest version of the notebook\n","All_notebook_versions = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv\", dtype=str)\n","print('Notebook version: '+Notebook_version)\n","Latest_Notebook_version = All_notebook_versions[All_notebook_versions[\"Notebook\"] == Network]['Version'].iloc[0]\n","print('Latest notebook version: '+Latest_Notebook_version)\n","if Notebook_version == Latest_Notebook_version:\n"," print(\"This notebook is up-to-date.\")\n","else:\n"," print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n","\n","\n","# Build requirements file for local run\n","after = [str(m) for m in sys.modules]\n","build_requirements_file(before, after)"]},{"cell_type":"markdown","metadata":{"id":"s7_nokQv7M4-"},"source":["# **2. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"5-hsYVdkjKuI"},"source":["\n","## **2.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"-goWypUVEvnp"},"outputs":[],"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"]},{"cell_type":"markdown","metadata":{"id":"L_pjmwONjTvb"},"source":["## **2.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"QK-DDu1ljVna"},"outputs":[],"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount('/content/gdrive')"]},{"cell_type":"markdown","metadata":{"id":"P-YFjdLR-5hv"},"source":["** If you cannot see your files, reactivate your session by connecting to your hosted runtime.** \n","\n","\n","\"Example
Connect to a hosted runtime.
"]},{"cell_type":"markdown","metadata":{"id":"Do_LZbDmpJiZ"},"source":["# **3. Select your paths and parameters**\n","\n","---\n","\n","The code below allows the user to enter the paths to where the training data is and to define the training parameters.\n","\n","If your dataset is large, this step can take a while. \n","\n","**Note:** The BG class reported by MaskRCNN stands for 'background'. By default BG is the default class in MaskRCNN, so even if your dataset contains only one class, MaskRCNN will treat the dataset as a two-class set.\n"]},{"cell_type":"markdown","metadata":{"id":"M5QFEW-HpRdQ"},"source":["## **3.1. Setting the main training parameters**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"vdLRX63upWcB"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`:** This is the path to your folder containing the subfolders *Training* and *Validation*, each containing images with their respective annotations. **If your files are not organised in this way, the notebook will NOT work. So make sure everything looks right!** To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten. **Note that MaskRCNN will add a timestamp to your model_name in the form: model_name*YearMonthDayTHourMinute***\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training parameters**\n","\n","**`Training Depth`:** Here, you can choose how much you want to train the network. MaskRCNN is already pretrained on a large dataset which means its weights are already initialised. This means it may not be necessary to train the full model to reach satisfactory results on your dataset. To get the most out of the model, we recommend training the headlayers first for ca. 30 epochs, and then retraining the same model with an increasing depth for further 10s of epochs. To do this, use the same model_name in this section, with any other needed parameters and then load the desired weights file in section 3.3. **Default value: Head layers only**\n","\n","**`number_of_epochs`:**Enter the number of epochs the networks will be trained for. Note that if you want to continue training a previously trained model, enter the final number of epochs you want to use, i.e. if your previous model was trained for 50 epochs and you want to train it to 80, enter 80 epochs here, not 30.\n","**Default value: 50**\n","\n","**`detection_confidence`:** The network will assign scores of confidence to any predictions of ROIs it makes on the dataset during training. The detection confidence here indicates what threshold score you want to apply for the network to use accept any predicted ROIs. We recommend starting low here. If you notice your network is giving you too many ROIs, then increase this value gradually. **Default value: 0**\n","\n","**`learning_rate:`** Input the initial value to be used as learning rate. The learning rate will decrease after 7 epochs if the validation loss does not improve. **Default value: 0.003**\n","\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"kajoWCX8ps4O"},"outputs":[],"source":["#@markdown ###Path to training images:\n","\n","Training_source = \"\" #@param {type:\"string\"}\n","\n","# Ground truth images\n","#Training_validation = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","##@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","full_model_path = os.path.join(model_path,model_name)\n","# if os.path.exists(full_model_path):\n","# print(bcolors.WARNING+'Model folder already exists and will be overwritten.'+bcolors.NORMAL)\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","\n","Training_depth = \"3+ resnet layers\" #@param [\"Head_layers_only\", \"3+ resnet layers\", \"4+ resnet layers\", \"5+ resnet layers\", \"all layers\"]\n","##@markdown ###Advanced Parameters\n","\n","#Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","##@markdown ###If not, please input:\n","\n","number_of_epochs = 10#@param {type:\"integer\"}\n","\n","batch_size = 4#@param{type:\"integer\"}\n","\n","image_resize_mode = \"none\"\n","\n","detection_confidence = 0 #@param {type:\"number\"}\n","\n","region_proposal_nms_threshold = 0.9 #@param{type:\"number\"}\n","\n","learning_rate = 0.003 #@param {type:\"number\"}\n","\n","#@markdown ###Loss weights\n","\n","region_proposal_class_loss = 1#@param {type:\"number\"}\n","region_proposal_class_loss = float(region_proposal_class_loss)\n","\n","region_proposal_bbox_loss = 1#@param {type:\"number\"}\n","region_proposal_bbox_loss = float(region_proposal_bbox_loss)\n","\n","mrcnn_class_loss = 1#@param {type:\"number\"}\n","mrcnn_class_loss = float(mrcnn_class_loss)\n","\n","mrcnn_bbox_loss = 1#@param {type:\"number\"}\n","mrcnn_bbox_loss = float(mrcnn_bbox_loss)\n","\n","mrcnn_mask_loss = 1#@param {type:\"number\"}\n","mrcnn_mask_loss = float(mrcnn_mask_loss)\n","\n","# Path to trained weights file\n","COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n","\n","# Directory to save logs and model checkpoints, if not provided\n","# through the command line argument --logs\n","DEFAULT_LOGS_DIR = model_path\n","\n","dataset_train = ClassDataset()\n","dataset_train.load_image_csv(Training_source, \"Training\")\n","dataset_train.prepare()\n","\n","print(\"Class Count: {}\".format(dataset_train.num_classes))\n","for i, info in enumerate(dataset_train.class_info):\n"," print(\"{:3}. {:50}\".format(i, info['name']))\n","\n","############################################################\n","# Configurations\n","############################################################\n","\n","\n","class ClassConfig(Config):\n"," \"\"\"Configuration for training on the toy dataset.\n"," Derives from the base Config class and overrides some values.\n"," \"\"\"\n"," # Give the configuration a recognizable name\n"," NAME = model_name\n","\n"," # We use a GPU with 12GB memory, which can fit two images.\n"," # Adjust down if you use a smaller GPU.\n"," IMAGES_PER_GPU = batch_size\n","\n"," # Number of classes (including background)\n"," NUM_CLASSES = len(dataset_train.class_names) # Background + nucleus\n","\n"," # Number of training steps per epoch\n"," STEPS_PER_EPOCH = (len(os.listdir(Training_source+\"/Training\"))/2) // IMAGES_PER_GPU\n"," VALIDATION_STEPS = (len(os.listdir(Training_source+\"/Validation\"))/2) // IMAGES_PER_GPU\n","\n"," # Skip detections with < 90% confidence\n"," # DETECTION_MIN_CONFIDENCE = detection_confidence\n","\n"," LEARNING_RATE = learning_rate\n","\n"," DETECTION_MIN_CONFIDENCE = 0\n","\n"," # Backbone network architecture\n"," # Supported values are: resnet50, resnet101\n"," BACKBONE = \"resnet101\"\n","\n"," # Input image resizing\n"," # Random crops of size 64x64\n"," IMAGE_RESIZE_MODE = image_resize_mode #\"crop\"\n"," IMAGE_MIN_DIM = 128\n"," IMAGE_MAX_DIM = 128\n"," IMAGE_MIN_SCALE = 2.0\n","\n"," # Length of square anchor side in pixels\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n","\n"," # ROIs kept after non-maximum supression (training and inference)\n"," POST_NMS_ROIS_TRAINING = 2000\n"," POST_NMS_ROIS_INFERENCE = 4000\n","\n"," # Non-max suppression threshold to filter RPN proposals.\n"," # You can increase this during training to generate more propsals.\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n","\n"," # How many anchors per image to use for RPN training\n"," RPN_TRAIN_ANCHORS_PER_IMAGE = 128\n","\n"," # Image mean (RGB)\n"," MEAN_PIXEL = np.array([43.53, 39.56, 48.22])\n","\n"," # If enabled, resizes instance masks to a smaller size to reduce\n"," # memory load. Recommended when using high-resolution images.\n"," USE_MINI_MASK = False\n"," MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n","\n"," # Number of ROIs per image to feed to classifier/mask heads\n"," # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n"," # enough positive proposals to fill this and keep a positive:negative\n"," # ratio of 1:3. You can increase the number of proposals by adjusting\n"," # the RPN NMS threshold.\n"," TRAIN_ROIS_PER_IMAGE = 128\n","\n"," # Maximum number of ground truth instances to use in one image\n"," MAX_GT_INSTANCES = 100\n","\n"," # Max number of final detections per image\n"," DETECTION_MAX_INSTANCES = 200\n","\n"," LOSS_WEIGHTS = {\n"," \"rpn_class_loss\": region_proposal_class_loss,\n"," \"rpn_bbox_loss\": region_proposal_bbox_loss,\n"," \"mrcnn_class_loss\": mrcnn_class_loss,\n"," \"mrcnn_bbox_loss\": mrcnn_bbox_loss,\n"," \"mrcnn_mask_loss\": mrcnn_mask_loss\n"," }\n","\n","if Training_depth == \"Head_layers_only\":\n"," layers = \"heads\"\n","elif Training_depth == \"3+ resnet layers\":\n"," layers = \"3+\"\n","elif Training_depth == \"4+ resnet layers\":\n"," layers = \"4+\"\n","elif Training_depth == \"5+ resnet layers\":\n"," layers = \"5+\"\n","else:\n"," layers = \"all\"\n","\n","config = ClassConfig()\n","# Training dataset\n","# dataset_train = ClassDataset()\n","# num_classes = dataset_train.load_image_csv(Training_source, \"Training\")\n","# dataset_train.prepare()\n","# print(\"Class Count: {}\".format(dataset_train.num_classes))\n","# for i, info in enumerate(dataset_train.class_info):\n","# print(\"{:3}. {:50}\".format(i, info['name']))\n","\n","# Load and display random samples\n","image_ids = np.random.choice(dataset_train.image_ids, 1)\n","for image_id in image_ids:\n"," image = dataset_train.load_image(image_id)\n"," mask, class_ids = dataset_train.load_mask(image_id)\n"," visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names, limit=dataset_train.num_classes-1)\n","\n","# plt.savefig('/content/TrainingDataExample_MaskRCNN.png',bbox_inches='tight',pad_inches=0)\n","\n","# image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(\n","# dataset_train, config, image_id, use_mini_mask=False)\n","\n","# visualize.display_instances(image, bbox, mask, class_ids, dataset_train.class_names,\n","# show_bbox=False)\n","model = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=DEFAULT_LOGS_DIR)\n","config.display()\n","Use_pretrained_model = False\n","Use_Data_augmentation = False"]},{"cell_type":"markdown","metadata":{"id":"PzWJwWFGlYZi"},"source":["##**3.2. Data augmentation**\n","\n","---\n","\n"," Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if the dataset the `Use_Data_Augmentation` box can be unticked.\n","\n"," If the box is ticked a simple augmentation of horizontal and vertical flipping will be applied to the dataset."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"d0BwRHRElaSD"},"outputs":[],"source":["#@markdown ##**Augmentation Options**\n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation == True:\n"," # Number of training steps per epoch\n"," class AugClassConfig(ClassConfig):\n"," STEPS_PER_EPOCH = 10*((len(os.listdir(Training_source+\"/Training\"))/2) // batch_size)\n"," VALIDATION_STEPS = 10*((len(os.listdir(Training_source+\"/Validation\"))/2) // batch_size)\n"," \n","if Use_Data_augmentation:\n"," config = AugClassConfig()"]},{"cell_type":"markdown","metadata":{"id":"uJjmzKGHk_p9"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a MaskRCNN model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"3JsrRmNbgNeL"},"outputs":[],"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If yes, please provide the path to the model (this path should end with the file extension .h5):\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","if Use_Data_augmentation == True:\n"," config = AugClassConfig()\n","else:\n"," config = ClassConfig()\n","\n","model = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=DEFAULT_LOGS_DIR)\n","model.load_weights(pretrained_model_path, by_name=True)"]},{"cell_type":"markdown","metadata":{"id":"rTWfoQEPuPad"},"source":["#**4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"CRPOHMNSo0Sj"},"source":["## **4.1. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"Li__jcfsTzs6"},"outputs":[],"source":["#@markdown ##Start training\n","\n","pdf_export(config, augmentation = Use_Data_augmentation, pretrained_model=Use_pretrained_model)\n","\n","if os.path.exists(model.log_dir+\"/Quality Control\"):\n"," shutil.rmtree(model.log_dir+\"/Quality Control\")\n","os.makedirs(model.log_dir+\"/Quality Control\")\n","\n","start = time.time()\n","#Here, we start the model training\n","train_csv(model, Training_source, augmentation=Use_Data_augmentation, epochs = number_of_epochs, layers = layers)\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","new_model_name = os.path.basename(model.log_dir)\n","#Here, we just save some interesting parameters from training as a csv file\n","if not os.path.exists(model_path+'/'+new_model_name+'/Quality Control/class_names.csv'):\n"," with open(model_path+'/'+new_model_name+'/Quality Control/class_names.csv','w') as class_count_csv:\n"," class_writer = csv.writer(class_count_csv)\n"," for class_name in dataset_train.class_names:\n"," class_writer.writerow([class_name])\n","\n","if os.path.exists(model_path+'/'+new_model_name+'/Quality Control/training_evaluation.csv'):\n"," with open(model_path+'/'+new_model_name+'/Quality Control/training_evaluation.csv','a') as csvfile:\n"," writer = csv.writer(csvfile)\n"," #print('hello')\n"," #writer.writerow(['epoch','loss','val_loss','learning rate'])\n"," model_starting_checkpoint = int(pretrained_model_path[-7:-3])\n"," for i in range(len(model.keras_model.history.history['loss'])):\n"," writer.writerow([str(model_starting_checkpoint+i),model.keras_model.history.history['loss'][i], str(learning_rate)])\n","else:\n"," with open(model_path+'/'+new_model_name+'/Quality Control/training_evaluation.csv','w') as csvfile:\n"," writer = csv.writer(csvfile)\n"," writer.writerow(['epoch','loss','val_loss','learning rate'])\n"," for i in range(len(model.keras_model.history.history['loss'])):\n"," writer.writerow([str(i+1),model.keras_model.history.history['loss'][i], model.keras_model.history.history['val_loss'][i], str(learning_rate)])\n","\n","pdf_export(config, trained=True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)"]},{"cell_type":"markdown","metadata":{"id":"n0-RUNbruHa6"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"b10mT10YtngQ"},"outputs":[],"source":["#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the name of the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","if (Use_the_current_trained_model): \n"," QC_model_folder = model_path+'/'+new_model_name\n","\n","QC_model_name = os.path.basename(QC_model_folder)\n","\n","if os.path.exists(QC_model_folder):\n"," print(\"The \"+QC_model_name+\" model will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path before proceeding further.')"]},{"cell_type":"markdown","metadata":{"id":"xOOXTMHkLqYq"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.\n","\n","In this notebook, the training loss curves are plotted using **tensorboard**. However, all the training results are also logged in a csv file in your model folder."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"-BpIBHDiOTqK"},"outputs":[],"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","if os.path.exists(QC_model_folder):\n"," os.chdir(QC_model_folder)\n"," %load_ext tensorboard\n"," %tensorboard --logdir \"$QC_model_folder\"\n","else:\n"," print(\"The chosen model or path does not exist. Check if your model_name was saved with a timestamp.\")"]},{"cell_type":"markdown","metadata":{"id":"PdJFjEXRKApD"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display an overlay of the input images ground-truth (solid lines) and predicted boxes (dashed lines). Additionally, the below cell will show the mAP value of the model on the QC data together with plots of the Precision-Recall curves for all the classes in the dataset. If you want to read in more detail about these scores, we recommend [this brief explanation](https://medium.com/@jonathan_hui/map-mean-average-precision-for-object-detection-45c121a31173).\n","\n"," In a nutshell:\n","\n","**Precision:** This is the proportion of the correct classifications (true positives) in all the predictions made by the model.\n","\n","**Recall:** This is the proportion of the detected true positives in all the detectable data.\n","\n"," The files provided in the \"QC_data_folder\" should be under a subfolder called validation which contains the images (e.g. as .jpg) and annotations (.csv files)!"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"8yhm7a3gAFdK"},"outputs":[],"source":["#@markdown ### Provide the path to your quality control dataset.\n","DEFAULT_LOGS_DIR = \"/content/gdrive/MyDrive\"\n","QC_data_folder = \"\" #@param {type:\"string\"}\n","#Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","\n","#Use_the_current_trained_model = False #@param {type:\"boolean\"}\n","\n","#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.\n","#QC_model_folder = \"/content/gdrive/MyDrive/maskrcnn_nucleus20210202T1206\" #@param {type:\"string\"}\n","\n","#@markdown ###Choose the checkpoint you want to evauluate:\n","Checkpoint = 8#@param {type:\"integer\"}\n","\n","#Load the dataset\n","dataset_val = ClassDataset()\n","dataset_val.load_image_csv(QC_data_folder, \"Validation\")\n","dataset_val.prepare()\n","\n","# Activate the (pre-)trained model\n","\n","detection_min_confidence = 0.35 #@param{type:\"number\"}\n","region_proposal_nms_threshold = 0.99 #@param{type:\"number\"}\n","resize_mode = \"none\" #@param[\"none\",\"square\",\"crop\",\"pad64\"]\n","\n","class InferenceConfig(ClassConfig):\n"," IMAGE_RESIZE_MODE = resize_mode\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n"," NAME = \"nucleus\"\n"," IMAGES_PER_GPU = 1\n"," # Number of classes (including background)\n"," DETECTION_MIN_CONFIDENCE = detection_min_confidence\n"," NUM_CLASSES = len(dataset_val.class_names) # Background + nucleus\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n"," POST_NMS_ROIS_INFERENCE = 15000\n","inference_config = InferenceConfig()\n","\n","# Recreate the model in inference mode\n","#if Use_the_current_trained_model:\n","model = modellib.MaskRCNN(mode=\"inference\", \n"," config=inference_config,\n"," model_dir=QC_model_folder)\n","# else:\n","# model = modellib.MaskRCNN(mode=\"inference\", \n","# config=inference_config,\n","# model_dir=QC_model_folder)\n","\n","# Get path to saved weights\n","if Checkpoint < 10:\n"," qc_model_path = QC_model_folder+\"/mask_rcnn_\"+QC_model_name[:-13]+\"_000\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 100:\n"," qc_model_path = QC_model_folder+\"/mask_rcnn_\"+QC_model_name[:-13]+\"_00\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 1000:\n"," qc_model_path = QC_model_folder+\"/mask_rcnn_\"+QC_model_name[:-13]+\"_0\"+str(Checkpoint)+\".h5\"\n","\n","# Load trained weights\n","print(\"Loading weights from \", qc_model_path)\n","model.load_weights(qc_model_path, by_name=True)\n","\n","# dataset_val = ClassDataset()\n","# num_classes = dataset_val.load_image_csv(QC_data_folder, \"Validation\")\n","# dataset_val.prepare()\n","\n","image_id = random.choice(dataset_val.image_ids)\n","original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n"," modellib.load_image_gt(dataset_val, inference_config, \n"," image_id, use_mini_mask=False)\n","\n","results = model.detect([original_image], verbose=1)\n","r = results[0]\n","visualize.display_differences(original_image, gt_bbox, gt_class_id, gt_mask, r['rois'], r['class_ids'], r['scores'], r['masks'], dataset_val.class_names, iou_threshold = 0.8, score_threshold= 0.8)\n","# visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, \n","# dataset_val.class_names, figsize=(8, 8))\n","\n","save_image(original_image, \"QC_example_data.png\", r['rois'], r['masks'],\n"," r['class_ids'],r['scores'],dataset_val.class_names,\n"," scores_thresh=0,mode=0,save_dir=QC_model_folder+'/Quality Control')"]},{"cell_type":"markdown","metadata":{"id":"IuXEDjWAK6pO"},"source":["##**5.3. Precision-Recall Curve**\n","\n"," The p-r curve can give a quantification how well the model\n","Since the training saves model checkpoints for each epoch, you should choose which one you want to use for quality control in the `Checkpoint` box."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"lzoGZUoCxpSc"},"outputs":[],"source":["#@markdown ###Show the precision-recall curve of the QC data\n","#@markdown Choose an IoU threshold for the p-r plot (between 0 and 1), ignore that the plot title says AP@50:\n","\n","iou_threshold = 0.3 #@param{type:\"number\"}\n","mAP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask,\n"," r['rois'], r['class_ids'], r['scores'], r['masks'],\n"," iou_threshold=iou_threshold)\n","visualize.plot_precision_recall(mAP, precisions, recalls)\n","plt.savefig(QC_model_folder+'/Quality Control/P-R_curve_'+QC_model_name+'.png',bbox_inches='tight',pad_inches=0)\n","\n","gt_match, pred_match, overlaps = utils.compute_matches(gt_bbox, gt_class_id, gt_mask, r['rois'], r['class_ids'], r['scores'], r['masks'])\n","\n","#TO DO: Implement for multiclasses\n","if len(dataset_val.class_names) == 2:\n"," with open (QC_model_folder+'/Quality Control/QC_results.csv','w') as csvfile:\n"," writer = csv.writer(csvfile)\n"," writer.writerow(['class','gt instances','True positives','False Negatives', 'IoU threshold', 'mAP'])\n"," for index in dataset_val.class_names:\n"," if index != 'BG':\n"," writer.writerow([index, str(len(gt_match)), str(len(pred_match)), str(len(gt_match)-len(pred_match)), str(iou_threshold), str(mAP)])\n"," qc_pdf_export()\n","else:\n"," print('Your dataset has more than one class. This means certain features may not be enabled. We are working on implementing this section fully for multiple classes.')"]},{"cell_type":"markdown","metadata":{"id":"MGBi1lB2vSOr"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"HrQPXU0DvWIT"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"7FttSetXvdTB"},"outputs":[],"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","DEFAULT_LOGS_DIR = \"/content/gdrive/MyDrive\"\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, provide the name of the model and path to model folder:\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","if Use_the_current_trained_model:\n"," Prediction_model_folder = model_path+'/'+new_model_name\n","\n","#@markdown ###Choose the checkpoint you want to evaluate:\n","Checkpoint = 8#@param {type:\"integer\"}\n","\n","if os.path.exists(Prediction_model_folder+'/Quality Control/class_names.csv'):\n"," print('Prediction classes detected! The model will predict the following classes:')\n"," class_names = []\n"," with open(Prediction_model_folder+'/Quality Control/class_names.csv', 'r') as class_names_csv:\n"," csvreader = csv.reader(class_names_csv)\n"," for row in csvreader:\n"," print(row[0])\n"," class_names.append(row[0])\n","\n","\n","detection_min_confidence = 0.1 #@param{type:\"number\"}\n","region_proposal_nms_threshold = 0.99 #@param{type:\"number\"}\n","resize_mode = \"none\" #@param[\"none\",\"square\",\"crop\",\"pad64\"]\n","post_nms_rois = 10000 #@param{type:\"integer\"}\n","\n","\n","#Load the dataset\n","dataset_val = ClassDataset()\n","dataset_val.load_image_csv(Data_folder, \"Validation\")\n","dataset_val.prepare()\n","\n"," # Activate the (pre-)trained model\n","class InferenceConfig(ClassConfig):\n"," IMAGE_RESIZE_MODE = resize_mode\n"," IMAGE_MIN_DIM = 128\n"," IMAGE_MAX_DIM = 128\n"," IMAGE_MIN_SCALE = 2.0\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n"," #DETECTION_NMS_THRESHOLD = 0.0\n"," NAME = \"nucleus\"\n"," IMAGES_PER_GPU = 1\n"," # Number of classes (including background)\n"," DETECTION_MIN_CONFIDENCE = detection_min_confidence\n"," NUM_CLASSES = len(dataset_val.class_names) # Background + nucleus\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n"," POST_NMS_ROIS_INFERENCE = post_nms_rois\n","\n","inference_config = InferenceConfig()\n","\n","# Recreate the model in inference mode\n","model = modellib.MaskRCNN(mode=\"inference\", \n"," config=inference_config,\n"," model_dir=Prediction_model_folder)\n","\n","# Get path to saved weights\n","if Checkpoint < 10:\n"," pred_model_path = Prediction_model_folder+\"/mask_rcnn_\"+os.path.basename(Prediction_model_folder[:-13])+\"_000\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 100:\n"," pred_model_path = Prediction_model_folder+\"/mask_rcnn_\"+os.path.basename(Prediction_model_folder[:-13])+\"_00\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 1000:\n"," pred_model_path = Prediction_model_folder+\"/mask_rcnn_\"+os.path.basename(Prediction_model_folder[:-13])+\"_0\"+str(Checkpoint)+\".h5\"\n","\n","# Load trained weights\n","print(\"Loading weights from \", pred_model_path)\n","model.load_weights(pred_model_path, by_name=True)\n","\n","#@markdown ###Choose how you would like to export the predictions:\n","Export_mode = \"image with class_name,score and mask\" #@param[\"image with bbox, class_name, scores, masks\",\"image with bbox,class_name and score\",\"image with class_name,score and mask\",\"mask with black background\"]\n","if Export_mode == \"image with bbox, class_name, scores, masks\":\n"," export_mode = 0\n","elif Export_mode == \"image with bbox,class_name and score\":\n"," export_mode = 1\n","elif Export_mode == \"image with class_name,score and mask\":\n"," export_mode = 2\n","elif Export_mode == \"mask with black background\":\n"," export_mode = 3\n","\n","\n","file_path = os.path.join(Data_folder, 'Validation')\n","for input in os.listdir(file_path):\n"," if input.endswith('.png'):\n"," image = io.imread(os.path.join(file_path,input))\n"," results = model.detect([image], verbose=0)\n"," r = results[0]\n"," save_image(image, \"predicted_\"+input, r['rois'], r['masks'],\n"," r['class_ids'],r['scores'],class_names,\n"," scores_thresh=0,mode=export_mode,save_dir=Result_folder)\n"]},{"cell_type":"markdown","metadata":{"id":"Yu4OGubv59qa"},"source":["## **6.2. Inspect the predicted output**\n","---\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"YnWgQZmlIuv9"},"outputs":[],"source":["#@markdown ##Run this cell to display a randomly chosen input with predicted mask.\n","\n","detection_min_confidence = 0.1 #@param{type:\"number\"}\n","region_proposal_nms_threshold = 0.99 #@param{type:\"number\"}\n","resize_mode = \"none\" #@param[\"none\",\"square\",\"crop\",\"pad64\"]\n","post_nms_rois = 10000 #@param{type:\"integer\"}\n","\n"," # Activate the (pre-)trained model\n","class InferenceConfig(ClassConfig):\n"," IMAGE_RESIZE_MODE = resize_mode\n"," IMAGE_MIN_DIM = 128\n"," IMAGE_MAX_DIM = 128\n"," IMAGE_MIN_SCALE = 2.0\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n"," #DETECTION_NMS_THRESHOLD = 0.0\n"," NAME = \"nucleus\"\n"," IMAGES_PER_GPU = 1\n"," # Number of classes (including background)\n"," DETECTION_MIN_CONFIDENCE = detection_min_confidence\n"," NUM_CLASSES = len(dataset_val.class_names) # Background + nucleus\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n"," POST_NMS_ROIS_INFERENCE = post_nms_rois\n","\n","inference_config = InferenceConfig()\n","\n","\n","model = modellib.MaskRCNN(mode=\"inference\", \n"," config=inference_config,\n"," model_dir=Prediction_model_folder)\n","\n","model.load_weights(pred_model_path, by_name=True)\n","example_image = random.choice(os.listdir(os.path.join(Data_folder,'Validation')))\n","\n","if example_image.endswith('.csv'):\n"," example_image = example_image[:-4]\n","\n","display_image = io.imread(file_path+'/'+example_image)\n","results = model.detect([display_image], verbose=0)\n","\n","r = results[0]\n","\n","visualize.display_instances(display_image, r['rois'], r['masks'], r['class_ids'], \n"," class_names, r['scores'], ax=get_ax())"]},{"cell_type":"markdown","metadata":{"id":"BrosGM4Z50gX"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"JYfEsBazHhkW"},"source":["# **7. Version log**\n","---\n","**v1.13**: \n","\n","\n","* This notebook is new as ZeroCostDL4Mic version 1.13. and is currently a beta version. \n","* Further edits to this notebook in future versions will be updated in this cell."]},{"cell_type":"markdown","metadata":{"id":"F3zreN5K5S2S"},"source":["#**Thank you for using MaskRCNN**!"]}],"metadata":{"accelerator":"GPU","colab":{"collapsed_sections":["YrTo6T74i7s0","RZL8pqcEi0KY","3yywetML0lUX","F3zreN5K5S2S"],"name":"MaskRCNN_ZeroCostDL4Mic.ipynb","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"}},"nbformat":4,"nbformat_minor":0} +{"cells":[{"cell_type":"markdown","metadata":{"id":"YrTo6T74i7s0"},"source":["# **MaskRCNN**\n","\n","---\n","\n"," This notebook is an implementation of MaskRCNN. This neural network performs instance segmentation. This means it can be used to detect objects in images, segment these objects and classify them. This notebook is based on the work of [He et al.](https://arxiv.org/abs/1703.06870)\n","\n","---\n","\n","*Disclaimer*:\n","\n","This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (ZeroCostDL4Mic) (https://github.com/HenriquesLab/DeepLearning_Collab/wiki)\n","\n","This notebook is based on the following paper: \n","\n","**Mask R-CNN**, arxiv, 2018 by Kaiming He, Georgia Gkioxari, Piotr Dollár, Ross Girshick [here](https://arxiv.org/abs/1703.06870)\n","\n","And source code found in: *https://github.com/matterport/Mask_RCNN*\n","\n","Provide information on dataset availability and link for download if applicable.\n","\n","\n","**Please also cite this original paper when using or developing this notebook.**"]},{"cell_type":"markdown","metadata":{"id":"RZL8pqcEi0KY"},"source":["# **How to use this notebook?**\n","\n","---\n","\n","Video describing how to use ZeroCostDL4Mic notebooks are available on youtube:\n"," - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n"," - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n","\n","\n","---\n","###**Structure of a notebook**\n","\n","The notebook contains two types of cell: \n","\n","**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n","\n","**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n","\n","---\n","###**Table of contents, Code snippets** and **Files**\n","\n","On the top left side of the notebook you find three tabs which contain from top to bottom:\n","\n","*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n","\n","*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n","\n","*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n","\n","**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n","\n","**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n","\n","---\n","###**Making changes to the notebook**\n","\n","**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n","\n","To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\n","You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment."]},{"cell_type":"markdown","metadata":{"id":"3yywetML0lUX"},"source":["#**0. Before getting started**\n","---\n","**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n","\n"," **Additionally, the corresponding input and output files need to have the same name**.\n","\n"," Please note that while the file format is flexible (.tif, .png, .jpeg should all work) but these currently **must be of RGB** type.\n","\n","Here's the data structure that you should use:\n","* Experiment A\n"," - **Training dataset**\n"," - Training\n"," - img_1.png, img_1.png.csv, img_2.png, img_2.png.csv, ...\n"," - Validation\n"," - img_a.png, img_a.png.csv, img_b.png, img_b.png.csv,...\n"," - **Quality control dataset**\n"," - Validation\n"," - img_a.png, img_a.png.csv, img_b.png, img_b.png.csv\n"," - **Data to be predicted**\n"," - **Results**\n","\n","---\n","\n"," **Note: This notebook is still in the beta stage.\n","Currently, the notebook works only if the annotation files are in csv format with the following columns:**\n","\n","***| filename | width | height | object_index | class_name | x | y |***\n","\n","where each row in the csv will provide the coordinates **(x,y)** of an edge point in the segmentation mask of an individual object with a dedicated **object_index** (e.g. 1, 2, 3....) and its **class_name** (e.g. 'nucleus' or 'horse' etc.) on the image of dimensions **width** x **height** (pixels). If you already have a dataset with segmentation masks we can provide a fiji macro that can convert the dataset into the correct format.\n","*We are actively working on integrating more flexibility into the annotations this notebook can be used with.*\n","\n","---\n","\n","**Important note**\n","\n","- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n","\n","- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n","\n","- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n","---"]},{"cell_type":"markdown","metadata":{"id":"ffNw8dIQjftT"},"source":["# **1. Install MaskRCNN and dependencies**\n","---\n"]},{"cell_type":"markdown","metadata":{"id":"iYBjQqd95MpG"},"source":["## **1.1. Install key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"UTratIh3-Zl_"},"outputs":[],"source":["#@markdown ##Install MaskRCNN and dependencies\n","!pip install fpdf2\n","!pip install imgaug\n","!pip install h5py==2.10\n","!git clone https://github.com/matterport/Mask_RCNN\n","\n","#Force session restart\n","exit(0)"]},{"cell_type":"markdown","metadata":{"id":"c3JUL5cQ5cY-"},"source":["## **1.2. Restart your runtime**\n","---\n","\n","\n","\n","** Ignore the following message error message. Your Runtime has automatically restarted. This is normal.**\n","\n","\"\"
\n"]},{"cell_type":"markdown","metadata":{"id":"eLGtfVWE6lu9"},"source":["## **1.3. Load key dependencies**\n","---\n"," "]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"laDhajuKOs9t"},"outputs":[],"source":["Notebook_version = '1.13'\n","Network = 'MaskRCNN'\n","\n","from builtins import any as b_any\n","\n","def get_requirements_path():\n"," # Store requirements file in 'base_path' directory\n"," current_dir = os.getcwd()\n"," dir_count = current_dir.count('/') - 1\n"," path = '../' * (dir_count) + 'requirements.txt'\n"," return path\n","\n","def filter_files(file_list, filter_list):\n"," filtered_list = []\n"," for fname in file_list:\n"," if b_any(fname.split('==')[0] in s for s in filter_list):\n"," filtered_list.append(fname)\n"," return filtered_list\n","\n","def build_requirements_file(before, after):\n"," path = get_requirements_path()\n","\n"," # Exporting requirements.txt for local run\n"," !pip freeze > $path\n","\n"," # Get minimum requirements file\n"," df = pd.read_csv(path)\n"," mod_list = [m.split('.')[0] for m in after if not m in before]\n"," req_list_temp = df.values.tolist()\n"," req_list = [x[0] for x in req_list_temp]\n","\n"," # Replace with package name and handle cases where import name is different to module name\n"," mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n"," mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]\n"," filtered_list = filter_files(req_list, mod_replace_list)\n","\n"," file=open(path,'w')\n"," for item in filtered_list:\n"," file.writelines(item)\n","\n"," file.close()\n","\n","import sys\n","before = [str(m) for m in sys.modules]\n","\n","#@markdown ##Load Key Dependencies\n","%tensorflow_version 1.x\n","\n","import os\n","import sys\n","import json\n","import datetime\n","import time\n","import numpy as np\n","import skimage.draw\n","from skimage import io\n","import imgaug\n","import pandas as pd\n","import csv\n","import random\n","import datetime\n","import shutil\n","from matplotlib import pyplot as plt\n","import matplotlib.lines as lines\n","from matplotlib.patches import Polygon\n","import IPython.display\n","from PIL import Image, ImageDraw, ImageFont\n","from fpdf import FPDF, HTMLMixin \n","from pip._internal.operations.freeze import freeze\n","import subprocess as sp\n","\n","#Create a variable to get and store relative base path\n","base_path = os.getcwd()\n","\n","# Root directory of the project\n","ROOT_DIR = os.path.abspath(base_path)\n","# !git clone https://github.com/matterport/Mask_RCNN\n","# Import Mask RCNN\n","sys.path.append(ROOT_DIR) # To find local version of the library\n","os.chdir(base_path + '/Mask_RCNN')\n","\n","#Here we need to replace \"self.keras_model.metrics_tensors.append(loss)\" with \"self.keras_model.add_metric(loss, name)\"\n","# in model.py line 2199, otherwise we get version issues.\n","from tempfile import mkstemp\n","from shutil import move, copymode\n","from os import fdopen, remove\n","#This function replaces the old default files with new values\n","def replace(file_path, pattern, subst):\n"," #Create temp file\n"," fh, abs_path = mkstemp()\n"," with fdopen(fh,'w') as new_file:\n"," with open(file_path) as old_file:\n"," for line in old_file:\n"," new_file.write(line.replace(pattern, subst))\n"," #Copy the file permissions from the old file to the new file\n"," copymode(file_path, abs_path)\n"," #Remove original file\n"," remove(file_path)\n"," #Move new file\n"," move(abs_path, file_path)\n","\n","replace(base_path + \"/Mask_RCNN/mrcnn/model.py\",'self.keras_model.metrics_tensors.append(loss)','self.keras_model.add_metric(loss, name)')\n","#replace(base_path + \"/Mask_RCNN/mrcnn/model.py\", \"save_weights_only=True),\", \"save_weights_only=True),\\n keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 30, min_lr = 0, verbose = 1)\")\n","#replace(base_path + \"/Mask_RCNN/mrcnn/model.py\", \"save_weights_only=True),\", \"save_weights_only=True),\\n keras.callbacks.CSVLogger(base_path + '/results.csv'),\")\n","replace(base_path + \"/Mask_RCNN/mrcnn/model.py\",'workers = 0','workers = 1')\n","replace(base_path + \"/Mask_RCNN/mrcnn/model.py\",'workers = multiprocessing.cpu_count()','workers = 1')\n","replace(base_path + \"/Mask_RCNN/mrcnn/model.py\",'use_multiprocessing=True','use_multiprocessing=False')\n","replace(base_path + \"/Mask_RCNN/mrcnn/utils.py\",\"shift = np.array([0, 0, 1, 1])\",\"shift = np.array([0., 0., 1., 1.])\")\n","replace(base_path + \"/Mask_RCNN/mrcnn/visualize.py\", \"i += 1\",\"i += 1\\n plt.savefig(base_path + '/TrainingDataExample_MaskRCNN.png',bbox_inches='tight',pad_inches=0)\")\n","#replace(base_path + \"/Mask_RCNN/mrcnn/model.py\",\" class_ids\",\" if config.NUM_CLASSES == 2:\\n class_ids = tf.ones_like(probs[:, 0], dtype=tf.int32)\\n else:\\n class_ids\")\n","\n","#Using this command will allow display of detections below the 0.5 score threshold, if only 1 class beyond background is in the dataset\n","replace(base_path + \"/Mask_RCNN/mrcnn/model.py\",\"class_ids = tf.argmax(probs\",\"if config.NUM_CLASSES >= 2:\\n class_ids = tf.ones_like(probs[:, 0], dtype=tf.int32)\\n else:\\n class_ids = tf.argmax(probs\")\n","\n","\n","from mrcnn.config import Config\n","from mrcnn import model as modellib, utils\n","from mrcnn import visualize\n","from mrcnn.model import log\n","from mrcnn import utils\n","\n","def get_ax(rows=1, cols=1, size=8):\n"," \"\"\"Return a Matplotlib Axes array to be used in\n"," all visualizations in the notebook. Provide a\n"," central point to control graph sizes.\n"," \n"," Change the default size attribute to control the size\n"," of rendered images\n"," \"\"\"\n"," _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n"," return ax\n","\n","############################################################\n","# Dataset\n","############################################################\n","\n","class ClassDataset(utils.Dataset):\n"," def load_coco(annotation_file):\n"," dataset = json.load(open(annotation_file, 'r'))\n"," assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n"," self.dataset = dataset\n"," self.createIndex()\n","\n"," def createIndex(self):\n"," # create index\n"," print('creating index...')\n"," anns, cats, imgs = {}, {}, {}\n"," imgToAnns,catToImgs = defaultdict(list),defaultdict(list)\n"," if 'annotations' in self.dataset:\n"," for ann in self.dataset['annotations']:\n"," imgToAnns[ann['image_id']].append(ann)\n"," anns[ann['id']] = ann\n","\n"," if 'images' in self.dataset:\n"," for img in self.dataset['images']:\n"," imgs[img['id']] = img\n","\n"," if 'categories' in self.dataset:\n"," for cat in self.dataset['categories']:\n"," cats[cat['id']] = cat\n","\n"," if 'annotations' in self.dataset and 'categories' in self.dataset:\n"," for ann in self.dataset['annotations']:\n"," catToImgs[ann['category_id']].append(ann['image_id'])\n","\n"," print('index created!')\n","\n"," # create class members\n"," self.anns = anns\n"," self.imgToAnns = imgToAnns\n"," self.catToImgs = catToImgs\n"," self.imgs = imgs\n"," self.cats = cats\n","\n"," def load_class(self, dataset_dir, subset):\n"," \"\"\"Load a subset of the dataset.\n"," dataset_dir: Root directory of the dataset.\n"," subset: Subset to load: train or val\n"," \"\"\"\n","\n"," # Add classes. We have only one class to add.\n"," self.add_class(\"Training_Datasets\", 1, \"nucleus\")\n"," \n"," # Train or validation dataset?\n"," assert subset in [\"Training\", \"Validation\"]\n"," dataset_dir = os.path.join(dataset_dir, subset)\n","\n"," # Load annotations\n"," # VGG Image Annotator (up to version 1.6) saves each image in the form:\n"," # { 'filename': '28503151_5b5b7ec140_b.jpg',\n"," # 'regions': {\n"," # '0': {\n"," # 'region_attributes': {},\n"," # 'shape_attributes': {\n"," # 'all_points_x': [...],\n"," # 'all_points_y': [...],\n"," # 'name': 'polygon'}},\n"," # ... more regions ...\n"," # },\n"," # 'size': 100202\n"," # }\n"," # We mostly care about the x and y coordinates of each region\n"," # Note: In VIA 2.0, regions was changed from a dict to a list.\n"," annotations = json.load(open(os.path.join(dataset_dir, \"birds071220220_json.json\")))\n"," annotations = list(annotations.values()) # don't need the dict keys\n"," \n"," # The VIA tool saves images in the JSON even if they don't have any\n"," # annotations. Skip unannotated images.\n"," annotations = [a for a in annotations if a['regions']]\n"," \n"," # Add images\n"," for a in annotations:\n"," # Get the x, y coordinaets of points of the polygons that make up\n"," # the outline of each object instance. These are stores in the\n"," # shape_attributes (see json format above)\n"," # The if condition is needed to support VIA versions 1.x and 2.x.\n"," if type(a['regions']) is dict:\n"," polygons = [r['shape_attributes'] for r in a['regions'].values()]\n"," else:\n"," polygons = [r['shape_attributes'] for r in a['regions']] \n","\n"," #Get the class of the object\n"," obj_class = [c['region_attributes']['species'] for c in a['regions']]\n","\n"," # load_mask() needs the image size to convert polygons to masks.\n"," # Unfortunately, VIA doesn't include it in JSON, so we must read\n"," # the image. This is only managable since the dataset is tiny.\n"," image_path = os.path.join(dataset_dir, a['filename'])\n"," image = skimage.io.imread(image_path)\n"," height, width = image.shape[:2]\n","\n"," self.add_image(\n"," \"Training_Datasets\",\n"," image_id=a['filename'], # use file name as a unique image id\n"," path=image_path,\n"," width=width, height=height,\n"," polygons=polygons,\n"," obj_class=obj_class)\n"," \n"," def load_image_csv(self, dataset_dir, subset):\n"," # Add classes. We have only one class to add.\n"," # self.add_class(\"Training_Datasets\", 1, \"nucleus\")\n"," #self.add_class(\"Training_Datasets\", 2, \"Great tit\")\n"," \n"," # Train or validation dataset?\n"," assert subset in [\"Training\", \"Validation\"]\n"," dataset_dir = os.path.join(dataset_dir, subset)\n"," #Data Format\n"," #csv file:\n"," #filename,width,height,object_index, class_name, x, y\n"," #file_1,256,256,1,nucleus, 1, 1\n"," #file_1,256,256,1,nucleus, 3, 10\n"," #file_1,256,256,1,nucleus, 1, 3\n"," #file_1,256,256,1,nucleus, 3, 7\n"," #file_1,256,256,2,nucleus, 17, 20\n"," #...\n"," class_index = 0\n"," obj_class_old = \"\"\n"," #class_names will hold all the classes we find in the dataset \n"," class_names = {obj_class_old:class_index}\n"," for csv_file_name in os.listdir(dataset_dir):\n"," if csv_file_name.endswith('.csv'):\n"," with open(os.path.join(dataset_dir,csv_file_name)) as csvfile_count:\n"," row_count = sum(1 for _ in csvfile_count)\n"," with open(os.path.join(dataset_dir,csv_file_name)) as csvfile:\n"," annotations = csv.reader(csvfile)\n"," next(annotations)\n"," polygons = []\n"," x_values = []\n"," y_values = []\n"," index_old = 1\n"," for line in annotations:\n"," img_file_name = line[0]\n"," index_new = int(line[4])\n"," obj_class = line[3]\n"," \n"," if not obj_class in class_names:\n"," class_index+=1\n"," class_names[obj_class] = class_index\n"," self.add_class(\"Training_Datasets\", class_index, obj_class)\n"," \n"," if index_new == index_old:\n"," x_values.append(int(line[5]))\n"," y_values.append(int(line[6]))\n"," \n"," if row_count == annotations.line_num:\n"," polygon = {\"class_name\":class_names[obj_class],\"all_points_x\":x_values,\"all_points_y\":y_values}\n"," polygons.append(polygon)\n"," \n"," elif index_new != index_old:\n"," polygon = {\"class_name\":class_names[obj_class_old],\"all_points_x\":x_values,\"all_points_y\":y_values}\n"," polygons.append(polygon)\n"," x_values = []\n"," x_values.append(int(line[5]))\n"," y_values = []\n"," y_values.append(int(line[6]))\n"," \n"," index_old = int(line[4])\n"," obj_class_old = line[3]\n"," image_path = os.path.join(dataset_dir,img_file_name)\n"," \n"," self.add_image(\n"," \"Training_Datasets\",\n"," image_id=img_file_name, # use file name as a unique image id\n"," path=image_path,\n"," width=int(line[1]), height=int(line[2]),\n"," polygons=polygons)\n"," #print(csv_file_name, class_index, polygons)\n"," return class_index\n","\n"," def load_mask(self, image_id):\n"," info = self.image_info[image_id]\n"," #print(info)\n"," mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n"," dtype=np.uint8)\n"," class_ids = []\n"," #class_index = 0\n"," for i, p in enumerate(info[\"polygons\"]):\n"," \n"," class_name = p['class_name']\n"," # class_names = {class_name:class_index}\n"," # if class_name != class_name_old:\n"," # class_index+=1\n"," # class_names[class_name] = class_index\n"," \n"," # Get indexes of pixels inside the polygon and set them to 1\n"," # print(p['y_values'])\n"," rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n"," mask[rr, cc, i] = 1\n"," \n"," #class_name_old = p['class_name']\n"," class_ids.append(class_name)\n"," \n"," class_ids = np.array(class_ids)\n","\n"," return mask.astype(np.bool), class_ids.astype(np.int32)\n","\n"," # def load_mask(self, image_id):\n"," # \"\"\"Generate instance masks for an image.\n"," # Returns:\n"," # masks: A bool array of shape [height, width, instance count] with\n"," # one mask per instance.\n"," # class_ids: a 1D array of class IDs of the instance masks.\n"," # \"\"\"\n"," # def clean_name(name):\n"," # \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n"," # return \",\".join(name.split(\",\")[:1])\n","\n"," # # If not a balloon dataset image, delegate to parent class.\n"," # image_info = self.image_info[image_id]\n"," # if image_info[\"source\"] != \"Training_Datasets\":\n"," # return super(self.__class__, self).load_mask(image_id)\n","\n"," # # Convert polygons to a bitmap mask of shape\n"," # # [height, width, instance_count]\n"," # info = self.image_info[image_id]\n","\n"," # mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n"," # dtype=np.uint8)\n"," # for i, p in enumerate(info[\"polygons\"]):\n"," # # Get indexes of pixels inside the polygon and set them to 1\n"," # rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n"," # mask[rr, cc, i] = 1\n","\n"," # classes = info[\"obj_class\"]\n"," # class_list = [clean_name(c[\"name\"]) for c in self.class_info]\n"," # class_ids = np.array([class_list.index(s) for s in classes])\n","\n"," # # Return mask, and array of class IDs of each instance. Since we have\n"," # # one class ID only, we return an array of 1s\n"," # return mask.astype(np.bool), class_ids.astype(np.int32)#np.ones([mask.shape[-1]], dtype=np.int32)\n","\n"," def image_reference(self, image_id):\n"," \"\"\"Return the path of the image.\"\"\"\n"," info = self.image_info[image_id]\n"," if info[\"source\"] == \"Training_Datasets\":\n"," return info[\"path\"]\n"," else:\n"," super(self.__class__, self).image_reference(image_id)\n","\n","\n","def train(model, augmentation=True):\n"," \"\"\"Train the model.\"\"\"\n"," # Training dataset.\n"," dataset_train = ClassDataset()\n"," dataset_train.load_class(base_path + '/gdrive/MyDrive/MaskRCNN/Training_Datasets', \"Training\")\n"," dataset_train.prepare()\n","\n"," # Validation dataset\n"," dataset_val = ClassDataset()\n"," dataset_val.load_class(base_path + '/gdrive/MyDrive/MaskRCNN/Training_Datasets', \"Validation\")\n"," dataset_val.prepare()\n","\n"," if augmentation == True:\n"," augment = imgaug.augmenters.Sometimes(0.5, imgaug.augmenters.OneOf([imgaug.augmenters.Fliplr(0.5),\n"," imgaug.augmenters.Flipud(0.5),\n"," imgaug.augmenters.Affine(rotate=45)]))\n"," else:\n"," augment = None\n"," # *** This training schedule is an example. Update to your needs ***\n"," # Since we're using a very small dataset, and starting from\n"," # COCO trained weights, we don't need to train too long. Also,\n"," # no need to train all layers, just the heads should do it.\n"," print(\"Training network heads\")\n"," model.train(dataset_train, dataset_val,\n"," learning_rate=config.LEARNING_RATE,\n"," epochs=80,\n"," augmentation = augment,\n"," layers='heads')\n","\n","\n","def train_csv(model, training_folder, augmentation=True, epochs = 20, layers = 'heads'):\n"," \"\"\"Train the model.\"\"\"\n"," # Training dataset.\n"," dataset_train = ClassDataset()\n"," dataset_train.load_image_csv(training_folder, \"Training\")\n"," dataset_train.prepare()\n","\n"," # Validation dataset\n"," dataset_val = ClassDataset()\n"," dataset_val.load_image_csv(training_folder, \"Validation\")\n"," dataset_val.prepare()\n","\n"," if augmentation == True:\n"," augment = imgaug.augmenters.SomeOf((1,2),[imgaug.augmenters.OneOf([imgaug.augmenters.Affine(rotate=90),\n"," imgaug.augmenters.Affine(rotate=180),\n"," imgaug.augmenters.Affine(rotate=270)]),\n"," imgaug.augmenters.Fliplr(0.5),\n"," imgaug.augmenters.Flipud(0.5),\n"," imgaug.augmenters.Multiply((0.8, 1.5)),\n"," imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))])\n"," else:\n"," augment = None\n"," # *** This training schedule is an example. Update to your needs ***\n"," # Since we're using a very small dataset, and starting from\n"," # COCO trained weights, we don't need to train too long. Also,\n"," # no need to train all layers, just the heads should do it.\n"," print(\"Training network heads\")\n"," model.train(dataset_train, dataset_val,\n"," learning_rate=config.LEARNING_RATE,\n"," epochs=epochs,\n"," augmentation = augment,\n"," layers=layers)\n","\n","def color_splash(image, mask):\n"," \"\"\"Apply color splash effect.\n"," image: RGB image [height, width, 3]\n"," mask: instance segmentation mask [height, width, instance count]\n"," Returns result image.\n"," \"\"\"\n"," # Make a grayscale copy of the image. The grayscale copy still\n"," # has 3 RGB channels, though.\n"," gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n"," # Copy color pixels from the original color image where mask is set\n"," if mask.shape[-1] > 0:\n"," # We're treating all instances as one, so collapse the mask into one layer\n"," mask = (np.sum(mask, -1, keepdims=True) >= 1)\n"," splash = np.where(mask, image, gray).astype(np.uint8)\n"," else:\n"," splash = gray.astype(np.uint8)\n"," return splash\n","\n","\n","def detect_and_color_splash(model, image_path=None, video_path=None):\n"," assert image_path or video_path\n","\n"," # Image or video?\n"," if image_path:\n"," # Run model detection and generate the color splash effect\n"," print(\"Running on {}\".format(args.image))\n"," # Read image\n"," image = skimage.io.imread(args.image)\n"," # Detect objects\n"," r = model.detect([image], verbose=1)[0]\n"," # Color splash\n"," splash = color_splash(image, r['masks'])\n"," # Save output\n"," file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n"," skimage.io.imsave(file_name, splash)\n"," elif video_path:\n"," import cv2\n"," # Video capture\n"," vcapture = cv2.VideoCapture(video_path)\n"," width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n"," height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n"," fps = vcapture.get(cv2.CAP_PROP_FPS)\n","\n"," # Define codec and create video writer\n"," file_name = \"splash_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n"," vwriter = cv2.VideoWriter(file_name,\n"," cv2.VideoWriter_fourcc(*'MJPG'),\n"," fps, (width, height))\n","\n"," count = 0\n"," success = True\n"," while success:\n"," print(\"frame: \", count)\n"," # Read next image\n"," success, image = vcapture.read()\n"," if success:\n"," # OpenCV returns images as BGR, convert to RGB\n"," image = image[..., ::-1]\n"," # Detect objects\n"," r = model.detect([image], verbose=0)[0]\n"," # Color splash\n"," splash = color_splash(image, r['masks'])\n"," # RGB -> BGR to save image to video\n"," splash = splash[..., ::-1]\n"," # Add image to video writer\n"," vwriter.write(splash)\n"," count += 1\n"," vwriter.release()\n"," print(\"Saved to \", file_name)\n","\n","# Colors for the warning messages\n","class bcolors:\n"," WARNING = '\\033[31m'\n"," NORMAL = '\\033[0m'\n","\n","class ClassConfig(Config):\n"," \"\"\"Configuration for training on the toy dataset.\n"," Derives from the base Config class and overrides some values.\n"," \"\"\"\n"," # Give the configuration a recognizable name\n"," # We use a GPU with 12GB memory, which can fit two images.\n"," # Adjust down if you use a smaller GPU.\n"," IMAGES_PER_GPU = 1\n"," DETECTION_MIN_CONFIDENCE = 0\n"," NAME = \"nucleus\"\n"," # Backbone network architecture\n"," # Supported values are: resnet50, resnet101\n"," BACKBONE = \"resnet50\"\n"," # Input image resizing\n"," # Random crops of size 64x64\n"," IMAGE_RESIZE_MODE = \"crop\"\n"," IMAGE_MIN_DIM = 256\n"," IMAGE_MAX_DIM = 256\n"," IMAGE_MIN_SCALE = 2.0\n"," # Length of square anchor side in pixels\n"," RPN_ANCHOR_SCALES = (4, 8, 16, 32, 64)\n"," # ROIs kept after non-maximum supression (training and inference)\n"," POST_NMS_ROIS_TRAINING = 200\n"," POST_NMS_ROIS_INFERENCE = 400\n"," # Non-max suppression threshold to filter RPN proposals.\n"," # You can increase this during training to generate more propsals.\n"," RPN_NMS_THRESHOLD = 0.9\n"," # How many anchors per image to use for RPN training\n"," RPN_TRAIN_ANCHORS_PER_IMAGE = 64\n"," # Image mean (RGB)\n"," MEAN_PIXEL = np.array([43.53, 39.56, 48.22])\n"," # If enabled, resizes instance masks to a smaller size to reduce\n"," # memory load. Recommended when using high-resolution images.\n"," USE_MINI_MASK = True\n"," MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n"," TRAIN_ROIS_PER_IMAGE = 128\n"," # Maximum number of ground truth instances to use in one image\n"," MAX_GT_INSTANCES = 100\n"," # Max number of final detections per image\n"," DETECTION_MAX_INSTANCES = 200\n","\n","# Below we define a function which saves the predictions.\n","# It is from this branch:\n","# https://github.com/matterport/Mask_RCNN/commit/bc8f148b820ebd45246ed358a120c99b09798d71\n","\n","def save_image(image, image_name, boxes, masks, class_ids, scores, class_names, filter_classs_names=None,\n"," scores_thresh=0.1, save_dir=None, mode=0):\n"," \"\"\"\n"," image: image array\n"," image_name: image name\n"," boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n"," masks: [num_instances, height, width]\n"," class_ids: [num_instances]\n"," scores: confidence scores for each box\n"," class_names: list of class names of the dataset\n"," filter_classs_names: (optional) list of class names we want to draw\n"," scores_thresh: (optional) threshold of confidence scores\n"," save_dir: (optional) the path to store image\n"," mode: (optional) select the result which you want\n"," mode = 0 , save image with bbox,class_name,score and mask;\n"," mode = 1 , save image with bbox,class_name and score;\n"," mode = 2 , save image with class_name,score and mask;\n"," mode = 3 , save mask with black background;\n"," \"\"\"\n"," mode_list = [0, 1, 2, 3]\n"," assert mode in mode_list, \"mode's value should in mode_list %s\" % str(mode_list)\n","\n"," if save_dir is None:\n"," save_dir = os.path.join(os.getcwd(), \"output\")\n"," if not os.path.exists(save_dir):\n"," os.makedirs(save_dir)\n","\n"," useful_mask_indices = []\n","\n"," N = boxes.shape[0]\n"," if not N:\n"," print(\"\\n*** No instances in image %s to draw *** \\n\" % (image_name))\n"," return\n"," else:\n"," assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n","\n"," for i in range(N):\n"," # filter\n"," class_id = class_ids[i]\n"," score = scores[i] if scores is not None else None\n"," if score is None or score < scores_thresh:\n"," continue\n","\n"," label = class_names[class_id]\n"," if (filter_classs_names is not None) and (label not in filter_classs_names):\n"," continue\n","\n"," if not np.any(boxes[i]):\n"," # Skip this instance. Has no bbox. Likely lost in image cropping.\n"," continue\n","\n"," useful_mask_indices.append(i)\n","\n"," if len(useful_mask_indices) == 0:\n"," print(\"\\n*** No instances in image %s to draw *** \\n\" % (image_name))\n"," return\n","\n"," colors = visualize.random_colors(len(useful_mask_indices))\n","\n"," if mode != 3:\n"," masked_image = image.astype(np.uint8).copy()\n"," else:\n"," masked_image = np.zeros(image.shape).astype(np.uint8)\n","\n"," if mode != 1:\n"," for index, value in enumerate(useful_mask_indices):\n"," masked_image = visualize.apply_mask(masked_image, masks[:, :, value], colors[index])\n","\n"," masked_image = Image.fromarray(masked_image)\n","\n"," if mode == 3:\n"," masked_image.save(os.path.join(save_dir, '%s' % (image_name)))\n"," return\n","\n"," draw = ImageDraw.Draw(masked_image)\n"," colors = np.array(colors).astype(int) * 255\n","\n"," for index, value in enumerate(useful_mask_indices):\n"," class_id = class_ids[value]\n"," score = scores[value]\n"," label = class_names[class_id]\n","\n"," y1, x1, y2, x2 = boxes[value]\n"," if mode != 2:\n"," color = tuple(colors[index])\n"," draw.rectangle((x1, y1, x2, y2), outline=color)\n","\n"," # Label\n"," font = ImageFont.load_default()\n"," draw.text((x1, y1), \"%s %f\" % (label, score), (255, 255, 255), font)\n","\n"," masked_image.save(os.path.join(save_dir, '%s' % (image_name)))\n","\n","def pdf_export(config, trained = False, augmentation = False, pretrained_model = False):\n"," class MyFPDF(FPDF, HTMLMixin):\n"," pass\n","\n"," config_list = \"\"\n"," for a in dir(config):\n"," if not a.startswith(\"__\") and not callable(getattr(config, a)):\n"," config_list += \"{}: {}\\n\".format(a, getattr(config, a))\n"," \n"," pdf = MyFPDF()\n"," pdf.add_page()\n"," pdf.set_right_margin(-1)\n"," pdf.set_font(\"Arial\", size = 11, style='B') \n","\n"," Network = 'MaskRCNN'\n"," day = datetime.datetime.now()\n"," datetime_str = str(day)[0:10]\n","\n"," Header = 'Training report for '+Network+' model ('+model_name+'):\\nDate: '+datetime_str\n"," pdf.multi_cell(180, 5, txt = Header, align = 'L') \n","\n"," # add another cell\n"," if trained:\n"," training_time = \"Training time: \"+str(hour)+ \"hour(s) \"+str(mins)+\"min(s) \"+str(round(sec))+\"sec(s)\"\n"," pdf.cell(190, 5, txt = training_time, ln = 1, align='L')\n"," pdf.ln(1)\n","\n"," Header_2 = 'Information for your materials and methods:'\n"," pdf.cell(190, 5, txt=Header_2, ln=1, align='L')\n","\n"," all_packages = ''\n"," for requirement in freeze(local_only=True):\n"," all_packages = all_packages+requirement+', '\n"," #print(all_packages)\n","\n"," #Main Packages\n"," main_packages = ''\n"," version_numbers = []\n"," for name in ['tensorflow','numpy','Keras']:\n"," find_name=all_packages.find(name)\n"," main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '\n"," #Version numbers only here:\n"," version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])\n","\n"," try:\n"," cuda_version = subprocess.run([\"nvcc\",\"--version\"],stdout=subprocess.PIPE)\n"," cuda_version = cuda_version.stdout.decode('utf-8')\n"," cuda_version = cuda_version[cuda_version.find(', V')+3:-1]\n"," except:\n"," cuda_version = ' - No cuda found - '\n"," try:\n"," gpu_name = subprocess.run([\"nvidia-smi\"],stdout=subprocess.PIPE)\n"," gpu_name = gpu_name.stdout.decode('utf-8')\n"," gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]\n"," except:\n"," gpu_name = ' - No GPU found - '\n"," #print(cuda_version[cuda_version.find(', V')+3:-1])\n"," #print(gpu_name)\n"," try:\n"," shape = io.imread(Training_source+'/Training/'+os.listdir(Training_source+'/Training')[0]).shape\n"," except:\n"," shape = io.imread(Training_source+'/Training/'+os.listdir(Training_source+'/Training')[0][:-4]).shape\n"," dataset_size = len(os.listdir(Training_source))/2\n","\n"," text = 'The '+Network+' model was trained using weights initialised on the coco dataset for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' labelled images (image dimensions: '+str(shape)+') with a batch size of '+str(config.BATCH_SIZE)+' and custom loss functions for region proposal and classification, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'\n","\n"," if pretrained_model:\n"," text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' labelled images (image dimensions: '+str(shape)+') with a batch size of '+str(config.BATCH_SIZE)+' and custom loss functions for region proposal and classification, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was retrained from a previous model checkpoint (model: '+os.path.basename(pretrained_model_path)[:-8]+', checkpoint: '+str(int(pretrained_model_path[-7:-3]))+'). Key python packages used include tensorflow (v '+version_numbers[0]+'), Keras (v '+version_numbers[2]+'), numpy (v '+version_numbers[1]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'\n","\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n"," pdf.multi_cell(190, 5, txt = text, align='L')\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.ln(1)\n"," pdf.cell(28, 5, txt='Augmentation: ', ln=0)\n"," pdf.set_font('')\n"," if augmentation:\n"," aug_text = 'The dataset was augmented by vertical and horizontal flipping'\n"," # if multiply_dataset_by >= 2:\n"," # aug_text = aug_text+'\\n- flipping'\n"," # if multiply_dataset_by > 2:\n"," # aug_text = aug_text+'\\n- rotation'\n"," else:\n"," aug_text = 'No augmentation was used for training.'\n"," pdf.multi_cell(190, 5, txt=aug_text, align='L')\n"," pdf.ln(1)\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(1)\n"," pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n"," # if Use_Default_Advanced_Parameters:\n"," # pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')\n"," pdf.cell(200, 5, txt='The following parameters were used for training:')\n"," pdf.ln(4)\n"," pdf.multi_cell(200, 5, txt=config_list)\n"," pdf.ln(1)\n","\n"," pdf.set_font(\"Arial\", size = 11, style='B')\n"," pdf.ln(1)\n"," pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)\n"," pdf.set_font('')\n"," pdf.multi_cell(170, 5, txt = Training_source+'/Training', align = 'L')\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(29, 5, txt= 'Validation:', align = 'L', ln=0)\n"," pdf.set_font('')\n"," pdf.multi_cell(170, 5, txt = Training_source+'/Validation', align = 'L')\n"," #pdf.cell(190, 5, txt=aug_text, align='L', ln=1)\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)\n"," pdf.set_font('')\n"," pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')\n"," pdf.ln(1)\n"," pdf.cell(60, 5, txt = 'Example ground-truth annotation', ln=1)\n"," pdf.ln(1)\n"," exp_size = io.imread(base_path + '/TrainingDataExample_MaskRCNN.png').shape\n"," pdf.image(base_path + '/TrainingDataExample_MaskRCNN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n"," pdf.ln(1)\n"," ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n"," pdf.multi_cell(190, 5, txt = ref_1, align='L')\n"," pdf.ln(1)\n"," ref_2 = '- MaskRCNN: Kaiming He, Georgia Gkioxari, Piotr Dollár, Ross Girshick. \"Mask R - CNN\" arxiv. 2018.'\n"," pdf.multi_cell(190, 5, txt = ref_2, align='L')\n"," pdf.ln(1)\n"," if augmentation:\n"," ref_3 = '- imgaug: Jung, Alexander et al., https://github.com/aleju/imgaug, (2020)'\n"," pdf.multi_cell(190, 5, txt = ref_3, align='L')\n"," pdf.ln(1)\n"," pdf.ln(3)\n"," reminder = 'Important:\\nRemember to perform the quality control step on all newly trained models\\nPlease consider depositing your training dataset on Zenodo'\n"," pdf.set_font('Arial', size = 11, style='B')\n"," pdf.multi_cell(190, 5, txt=reminder, align='C')\n"," pdf.ln(1)\n","\n"," pdf.output(os.path.dirname(model.log_dir)+'/'+model_name+'_training_report.pdf')\n","\n"," print('------------------------------')\n"," print('PDF report exported in '+model_path+'/'+model_name+'/')\n","\n","def qc_pdf_export():\n"," class MyFPDF(FPDF, HTMLMixin):\n"," pass\n","\n"," pdf = MyFPDF()\n"," pdf.add_page()\n"," pdf.set_right_margin(-1)\n"," pdf.set_font(\"Arial\", size = 11, style='B') \n","\n"," Network = 'MaskRCNN'\n","\n"," day = datetime.datetime.now()\n"," datetime_str = str(day)[0:16]\n","\n"," Header = 'Quality Control report for '+Network+' model ('+QC_model_name+', checkpoint:'+str(Checkpoint)+')\\nDate and Time: '+datetime_str\n"," pdf.multi_cell(180, 5, txt = Header, align = 'L') \n"," pdf.ln(1)\n","\n"," all_packages = ''\n"," for requirement in freeze(local_only=True):\n"," all_packages = all_packages+requirement+', '\n","\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(2)\n"," pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')\n"," pdf.ln(1)\n"," if os.path.exists(QC_model_folder+'/Quality Control/lossCurveAndmAPPlots.png'):\n"," exp_size = io.imread(QC_model_folder+'/Quality Control/lossCurveAndmAPPlots.png').shape\n"," pdf.image(QC_model_folder+'/Quality Control/lossCurveAndmAPPlots.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n"," else:\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size=10)\n"," pdf.multi_cell(190, 5, txt='If you would like to see the evolution of the loss function during training please play the first cell of the QC section in the notebook.')\n"," pdf.ln(1)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 10, style = 'B')\n"," pdf.cell(80, 5, txt = 'P-R curves for test dataset', ln=1, align='L')\n"," pdf.ln(2)\n"," #for i in range(len(AP)):\n"," # os.path.exists(QC_model_folder+'/Quality Control/P-R_curve_'+config['model']['labels'][i]+'.png'):\n"," exp_size = io.imread(QC_model_folder+'/Quality Control/P-R_curve_'+QC_model_name+'.png').shape\n"," pdf.ln(1)\n"," pdf.image(QC_model_folder+'/Quality Control/P-R_curve_'+QC_model_name+'.png', x=16, y=None, w=round(exp_size[1]/4), h=round(exp_size[0]/4))\n"," # else:\n"," # pdf.cell(100, 5, txt='For the class '+config['model']['labels'][i]+' the model did not predict any objects.', ln=1, align='L')\n"," pdf.ln(3)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(1)\n"," pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n","\n"," pdf.ln(1)\n"," html = \"\"\"\n"," \n"," \n"," \"\"\"\n"," with open(QC_model_folder+'/Quality Control/QC_results.csv', 'r') as csvfile:\n"," metrics = csv.reader(csvfile)\n"," header = next(metrics)\n"," class_name = header[0]\n"," gt = header[1]\n"," tp = header[2]\n"," fn = header[3]\n"," iou = header[4]\n"," mAP = header[5]\n"," header = \"\"\"\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \"\"\".format(class_name,gt,tp,fn,iou,mAP)\n"," html = html+header\n"," i=0\n"," for row in metrics:\n"," i+=1\n"," class_name = row[0]\n"," gt = row[1]\n"," tp = row[2]\n"," fn = row[3]\n"," iou = row[4]\n"," mAP = row[5]\n"," cells = \"\"\"\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \"\"\".format(class_name,str(gt),str(tp),str(fn),str(iou),str(mAP))\n"," html = html+cells\n"," html = html+\"\"\"
{0}{1}{2}{3}{4}{5}
{0}{1}{2}{3}{4}{5}
\"\"\"\n","\n"," pdf.write_html(html)\n"," pdf.set_font('')\n"," pdf.set_font('Arial', size = 11, style = 'B')\n"," pdf.ln(3)\n"," pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)\n"," pdf.ln(3)\n"," exp_size = io.imread(QC_model_folder+'/Quality Control/QC_example_data.png').shape\n"," pdf.image(QC_model_folder+'/Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/10))\n","\n"," pdf.set_font('')\n"," pdf.set_font_size(10.)\n"," pdf.ln(3)\n"," ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n"," pdf.multi_cell(190, 5, txt = ref_1, align='L')\n"," pdf.ln(1)\n"," ref_2 = '- MaskRCNN: Kaiming He, Georgia Gkioxari, Piotr Dollár, Ross Girshick. \"Mask R - CNN\" arxiv. 2018.'\n"," pdf.multi_cell(190, 5, txt = ref_2, align='L')\n"," pdf.ln(1)\n","\n"," pdf.ln(3)\n"," reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'\n","\n"," pdf.set_font('Arial', size = 11, style='B')\n"," pdf.multi_cell(190, 5, txt=reminder, align='C')\n"," pdf.ln(1)\n","\n"," pdf.output(QC_model_folder+'/Quality Control/'+QC_model_name+'_QC_report.pdf')\n","\n","\n"," print('------------------------------')\n"," print('PDF report exported in '+QC_model_folder+'/Quality Control/')\n","\n","\n","# Check if this is the latest version of the notebook\n","All_notebook_versions = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv\", dtype=str)\n","print('Notebook version: '+Notebook_version)\n","Latest_Notebook_version = All_notebook_versions[All_notebook_versions[\"Notebook\"] == Network]['Version'].iloc[0]\n","print('Latest notebook version: '+Latest_Notebook_version)\n","if Notebook_version == Latest_Notebook_version:\n"," print(\"This notebook is up-to-date.\")\n","else:\n"," print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n","\n","\n","# Build requirements file for local run\n","after = [str(m) for m in sys.modules]\n","build_requirements_file(before, after)"]},{"cell_type":"markdown","metadata":{"id":"s7_nokQv7M4-"},"source":["# **2. Initialise the Colab session**\n","\n","\n","\n","\n","---\n","\n","\n","\n","\n"]},{"cell_type":"markdown","metadata":{"id":"5-hsYVdkjKuI"},"source":["\n","## **2.1. Check for GPU access**\n","---\n","\n","By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n","\n","Go to **Runtime -> Change the Runtime type**\n","\n","**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n","\n","**Accelator: GPU** *(Graphics processing unit)*\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"-goWypUVEvnp"},"outputs":[],"source":["#@markdown ##Run this cell to check if you have GPU access\n","%tensorflow_version 1.x\n","import tensorflow as tf\n","if tf.test.gpu_device_name()=='':\n"," print('You do not have GPU access.') \n"," print('Did you change your runtime ?') \n"," print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n"," print('Expect slow performance. To access GPU try reconnecting later')\n","\n","else:\n"," print('You have GPU access')\n"," !nvidia-smi"]},{"cell_type":"markdown","metadata":{"id":"L_pjmwONjTvb"},"source":["## **2.2. Mount your Google Drive**\n","---\n"," To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n","\n"," Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n","\n"," Once this is done, your data are available in the **Files** tab on the top left of notebook."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"QK-DDu1ljVna"},"outputs":[],"source":["#@markdown ##Run this cell to connect your Google Drive to Colab\n","\n","#@markdown * Click on the URL. \n","\n","#@markdown * Sign in your Google Account. \n","\n","#@markdown * Copy the authorization code. \n","\n","#@markdown * Enter the authorization code. \n","\n","#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n","\n","#mounts user's Google Drive to Google Colab.\n","\n","from google.colab import drive\n","drive.mount(base_path + '/gdrive')"]},{"cell_type":"markdown","metadata":{"id":"P-YFjdLR-5hv"},"source":["** If you cannot see your files, reactivate your session by connecting to your hosted runtime.** \n","\n","\n","\"Example
Connect to a hosted runtime.
"]},{"cell_type":"markdown","metadata":{"id":"Do_LZbDmpJiZ"},"source":["# **3. Select your paths and parameters**\n","\n","---\n","\n","The code below allows the user to enter the paths to where the training data is and to define the training parameters.\n","\n","If your dataset is large, this step can take a while. \n","\n","**Note:** The BG class reported by MaskRCNN stands for 'background'. By default BG is the default class in MaskRCNN, so even if your dataset contains only one class, MaskRCNN will treat the dataset as a two-class set.\n"]},{"cell_type":"markdown","metadata":{"id":"M5QFEW-HpRdQ"},"source":["## **3.1. Setting the main training parameters**\n","---\n",""]},{"cell_type":"markdown","metadata":{"id":"vdLRX63upWcB"},"source":[" **Paths for training, predictions and results**\n","\n","**`Training_source:`:** This is the path to your folder containing the subfolders *Training* and *Validation*, each containing images with their respective annotations. **If your files are not organised in this way, the notebook will NOT work. So make sure everything looks right!** To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.\n","\n","**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten. **Note that MaskRCNN will add a timestamp to your model_name in the form: model_name*YearMonthDayTHourMinute***\n","\n","**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n","\n","**Training parameters**\n","\n","**`Training Depth`:** Here, you can choose how much you want to train the network. MaskRCNN is already pretrained on a large dataset which means its weights are already initialised. This means it may not be necessary to train the full model to reach satisfactory results on your dataset. To get the most out of the model, we recommend training the headlayers first for ca. 30 epochs, and then retraining the same model with an increasing depth for further 10s of epochs. To do this, use the same model_name in this section, with any other needed parameters and then load the desired weights file in section 3.3. **Default value: Head layers only**\n","\n","**`number_of_epochs`:**Enter the number of epochs the networks will be trained for. Note that if you want to continue training a previously trained model, enter the final number of epochs you want to use, i.e. if your previous model was trained for 50 epochs and you want to train it to 80, enter 80 epochs here, not 30.\n","**Default value: 50**\n","\n","**`detection_confidence`:** The network will assign scores of confidence to any predictions of ROIs it makes on the dataset during training. The detection confidence here indicates what threshold score you want to apply for the network to use accept any predicted ROIs. We recommend starting low here. If you notice your network is giving you too many ROIs, then increase this value gradually. **Default value: 0**\n","\n","**`learning_rate:`** Input the initial value to be used as learning rate. The learning rate will decrease after 7 epochs if the validation loss does not improve. **Default value: 0.003**\n","\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"kajoWCX8ps4O"},"outputs":[],"source":["#@markdown ###Path to training images:\n","\n","Training_source = \"\" #@param {type:\"string\"}\n","\n","# Ground truth images\n","#Training_validation = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","##@markdown ###Name of the model and path to model folder:\n","model_name = \"\" #@param {type:\"string\"}\n","model_path = \"\" #@param {type:\"string\"}\n","\n","full_model_path = os.path.join(model_path,model_name)\n","# if os.path.exists(full_model_path):\n","# print(bcolors.WARNING+'Model folder already exists and will be overwritten.'+bcolors.NORMAL)\n","\n","# other parameters for training.\n","#@markdown ###Training Parameters\n","\n","Training_depth = \"3+ resnet layers\" #@param [\"Head_layers_only\", \"3+ resnet layers\", \"4+ resnet layers\", \"5+ resnet layers\", \"all layers\"]\n","##@markdown ###Advanced Parameters\n","\n","#Use_Default_Advanced_Parameters = True #@param {type:\"boolean\"}\n","##@markdown ###If not, please input:\n","\n","number_of_epochs = 10#@param {type:\"integer\"}\n","\n","batch_size = 4#@param{type:\"integer\"}\n","\n","image_resize_mode = \"none\"\n","\n","detection_confidence = 0 #@param {type:\"number\"}\n","\n","region_proposal_nms_threshold = 0.9 #@param{type:\"number\"}\n","\n","learning_rate = 0.003 #@param {type:\"number\"}\n","\n","#@markdown ###Loss weights\n","\n","region_proposal_class_loss = 1#@param {type:\"number\"}\n","region_proposal_class_loss = float(region_proposal_class_loss)\n","\n","region_proposal_bbox_loss = 1#@param {type:\"number\"}\n","region_proposal_bbox_loss = float(region_proposal_bbox_loss)\n","\n","mrcnn_class_loss = 1#@param {type:\"number\"}\n","mrcnn_class_loss = float(mrcnn_class_loss)\n","\n","mrcnn_bbox_loss = 1#@param {type:\"number\"}\n","mrcnn_bbox_loss = float(mrcnn_bbox_loss)\n","\n","mrcnn_mask_loss = 1#@param {type:\"number\"}\n","mrcnn_mask_loss = float(mrcnn_mask_loss)\n","\n","# Path to trained weights file\n","COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n","\n","# Directory to save logs and model checkpoints, if not provided\n","# through the command line argument --logs\n","DEFAULT_LOGS_DIR = model_path\n","\n","dataset_train = ClassDataset()\n","dataset_train.load_image_csv(Training_source, \"Training\")\n","dataset_train.prepare()\n","\n","print(\"Class Count: {}\".format(dataset_train.num_classes))\n","for i, info in enumerate(dataset_train.class_info):\n"," print(\"{:3}. {:50}\".format(i, info['name']))\n","\n","############################################################\n","# Configurations\n","############################################################\n","\n","\n","class ClassConfig(Config):\n"," \"\"\"Configuration for training on the toy dataset.\n"," Derives from the base Config class and overrides some values.\n"," \"\"\"\n"," # Give the configuration a recognizable name\n"," NAME = model_name\n","\n"," # We use a GPU with 12GB memory, which can fit two images.\n"," # Adjust down if you use a smaller GPU.\n"," IMAGES_PER_GPU = batch_size\n","\n"," # Number of classes (including background)\n"," NUM_CLASSES = len(dataset_train.class_names) # Background + nucleus\n","\n"," # Number of training steps per epoch\n"," STEPS_PER_EPOCH = (len(os.listdir(Training_source+\"/Training\"))/2) // IMAGES_PER_GPU\n"," VALIDATION_STEPS = (len(os.listdir(Training_source+\"/Validation\"))/2) // IMAGES_PER_GPU\n","\n"," # Skip detections with < 90% confidence\n"," # DETECTION_MIN_CONFIDENCE = detection_confidence\n","\n"," LEARNING_RATE = learning_rate\n","\n"," DETECTION_MIN_CONFIDENCE = 0\n","\n"," # Backbone network architecture\n"," # Supported values are: resnet50, resnet101\n"," BACKBONE = \"resnet101\"\n","\n"," # Input image resizing\n"," # Random crops of size 64x64\n"," IMAGE_RESIZE_MODE = image_resize_mode #\"crop\"\n"," IMAGE_MIN_DIM = 128\n"," IMAGE_MAX_DIM = 128\n"," IMAGE_MIN_SCALE = 2.0\n","\n"," # Length of square anchor side in pixels\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n","\n"," # ROIs kept after non-maximum supression (training and inference)\n"," POST_NMS_ROIS_TRAINING = 2000\n"," POST_NMS_ROIS_INFERENCE = 4000\n","\n"," # Non-max suppression threshold to filter RPN proposals.\n"," # You can increase this during training to generate more propsals.\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n","\n"," # How many anchors per image to use for RPN training\n"," RPN_TRAIN_ANCHORS_PER_IMAGE = 128\n","\n"," # Image mean (RGB)\n"," MEAN_PIXEL = np.array([43.53, 39.56, 48.22])\n","\n"," # If enabled, resizes instance masks to a smaller size to reduce\n"," # memory load. Recommended when using high-resolution images.\n"," USE_MINI_MASK = False\n"," MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n","\n"," # Number of ROIs per image to feed to classifier/mask heads\n"," # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n"," # enough positive proposals to fill this and keep a positive:negative\n"," # ratio of 1:3. You can increase the number of proposals by adjusting\n"," # the RPN NMS threshold.\n"," TRAIN_ROIS_PER_IMAGE = 128\n","\n"," # Maximum number of ground truth instances to use in one image\n"," MAX_GT_INSTANCES = 100\n","\n"," # Max number of final detections per image\n"," DETECTION_MAX_INSTANCES = 200\n","\n"," LOSS_WEIGHTS = {\n"," \"rpn_class_loss\": region_proposal_class_loss,\n"," \"rpn_bbox_loss\": region_proposal_bbox_loss,\n"," \"mrcnn_class_loss\": mrcnn_class_loss,\n"," \"mrcnn_bbox_loss\": mrcnn_bbox_loss,\n"," \"mrcnn_mask_loss\": mrcnn_mask_loss\n"," }\n","\n","if Training_depth == \"Head_layers_only\":\n"," layers = \"heads\"\n","elif Training_depth == \"3+ resnet layers\":\n"," layers = \"3+\"\n","elif Training_depth == \"4+ resnet layers\":\n"," layers = \"4+\"\n","elif Training_depth == \"5+ resnet layers\":\n"," layers = \"5+\"\n","else:\n"," layers = \"all\"\n","\n","config = ClassConfig()\n","# Training dataset\n","# dataset_train = ClassDataset()\n","# num_classes = dataset_train.load_image_csv(Training_source, \"Training\")\n","# dataset_train.prepare()\n","# print(\"Class Count: {}\".format(dataset_train.num_classes))\n","# for i, info in enumerate(dataset_train.class_info):\n","# print(\"{:3}. {:50}\".format(i, info['name']))\n","\n","# Load and display random samples\n","image_ids = np.random.choice(dataset_train.image_ids, 1)\n","for image_id in image_ids:\n"," image = dataset_train.load_image(image_id)\n"," mask, class_ids = dataset_train.load_mask(image_id)\n"," visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names, limit=dataset_train.num_classes-1)\n","\n","# plt.savefig(base_path + '/TrainingDataExample_MaskRCNN.png',bbox_inches='tight',pad_inches=0)\n","\n","# image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(\n","# dataset_train, config, image_id, use_mini_mask=False)\n","\n","# visualize.display_instances(image, bbox, mask, class_ids, dataset_train.class_names,\n","# show_bbox=False)\n","model = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=DEFAULT_LOGS_DIR)\n","config.display()\n","Use_pretrained_model = False\n","Use_Data_augmentation = False"]},{"cell_type":"markdown","metadata":{"id":"PzWJwWFGlYZi"},"source":["##**3.2. Data augmentation**\n","\n","---\n","\n"," Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if the dataset the `Use_Data_Augmentation` box can be unticked.\n","\n"," If the box is ticked a simple augmentation of horizontal and vertical flipping will be applied to the dataset."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"d0BwRHRElaSD"},"outputs":[],"source":["#@markdown ##**Augmentation Options**\n","\n","Use_Data_augmentation = True #@param {type:\"boolean\"}\n","\n","if Use_Data_augmentation == True:\n"," # Number of training steps per epoch\n"," class AugClassConfig(ClassConfig):\n"," STEPS_PER_EPOCH = 10*((len(os.listdir(Training_source+\"/Training\"))/2) // batch_size)\n"," VALIDATION_STEPS = 10*((len(os.listdir(Training_source+\"/Validation\"))/2) // batch_size)\n"," \n","if Use_Data_augmentation:\n"," config = AugClassConfig()"]},{"cell_type":"markdown","metadata":{"id":"uJjmzKGHk_p9"},"source":["\n","## **3.3. Using weights from a pre-trained model as initial weights**\n","---\n"," Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a MaskRCNN model**. \n","\n"," This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"3JsrRmNbgNeL"},"outputs":[],"source":["# @markdown ##Loading weights from a pre-trained network\n","\n","Use_pretrained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If yes, please provide the path to the model (this path should end with the file extension .h5):\n","pretrained_model_path = \"\" #@param {type:\"string\"}\n","\n","if Use_Data_augmentation == True:\n"," config = AugClassConfig()\n","else:\n"," config = ClassConfig()\n","\n","model = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=DEFAULT_LOGS_DIR)\n","model.load_weights(pretrained_model_path, by_name=True)"]},{"cell_type":"markdown","metadata":{"id":"rTWfoQEPuPad"},"source":["#**4. Train the network**\n","---"]},{"cell_type":"markdown","metadata":{"id":"CRPOHMNSo0Sj"},"source":["## **4.1. Train the network**\n","---\n","When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n","\n","* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"Li__jcfsTzs6"},"outputs":[],"source":["#@markdown ##Start training\n","\n","pdf_export(config, augmentation = Use_Data_augmentation, pretrained_model=Use_pretrained_model)\n","\n","if os.path.exists(model.log_dir+\"/Quality Control\"):\n"," shutil.rmtree(model.log_dir+\"/Quality Control\")\n","os.makedirs(model.log_dir+\"/Quality Control\")\n","\n","start = time.time()\n","#Here, we start the model training\n","train_csv(model, Training_source, augmentation=Use_Data_augmentation, epochs = number_of_epochs, layers = layers)\n","dt = time.time() - start\n","mins, sec = divmod(dt, 60) \n","hour, mins = divmod(mins, 60) \n","print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n","\n","new_model_name = os.path.basename(model.log_dir)\n","#Here, we just save some interesting parameters from training as a csv file\n","if not os.path.exists(model_path+'/'+new_model_name+'/Quality Control/class_names.csv'):\n"," with open(model_path+'/'+new_model_name+'/Quality Control/class_names.csv','w') as class_count_csv:\n"," class_writer = csv.writer(class_count_csv)\n"," for class_name in dataset_train.class_names:\n"," class_writer.writerow([class_name])\n","\n","if os.path.exists(model_path+'/'+new_model_name+'/Quality Control/training_evaluation.csv'):\n"," with open(model_path+'/'+new_model_name+'/Quality Control/training_evaluation.csv','a') as csvfile:\n"," writer = csv.writer(csvfile)\n"," #print('hello')\n"," #writer.writerow(['epoch','loss','val_loss','learning rate'])\n"," model_starting_checkpoint = int(pretrained_model_path[-7:-3])\n"," for i in range(len(model.keras_model.history.history['loss'])):\n"," writer.writerow([str(model_starting_checkpoint+i),model.keras_model.history.history['loss'][i], str(learning_rate)])\n","else:\n"," with open(model_path+'/'+new_model_name+'/Quality Control/training_evaluation.csv','w') as csvfile:\n"," writer = csv.writer(csvfile)\n"," writer.writerow(['epoch','loss','val_loss','learning rate'])\n"," for i in range(len(model.keras_model.history.history['loss'])):\n"," writer.writerow([str(i+1),model.keras_model.history.history['loss'][i], model.keras_model.history.history['val_loss'][i], str(learning_rate)])\n","\n","pdf_export(config, trained=True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)"]},{"cell_type":"markdown","metadata":{"id":"n0-RUNbruHa6"},"source":["# **5. Evaluate your model**\n","---\n","\n","This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n","\n","**We highly recommend to perform quality control on all newly trained models.**\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"b10mT10YtngQ"},"outputs":[],"source":["#@markdown ###Do you want to assess the model you just trained ?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, please provide the name of the model folder:\n","\n","QC_model_folder = \"\" #@param {type:\"string\"}\n","\n","if (Use_the_current_trained_model): \n"," QC_model_folder = model_path+'/'+new_model_name\n","\n","QC_model_name = os.path.basename(QC_model_folder)\n","\n","if os.path.exists(QC_model_folder):\n"," print(\"The \"+QC_model_name+\" model will be evaluated\")\n","else:\n"," W = '\\033[0m' # white (normal)\n"," R = '\\033[31m' # red\n"," print(R+'!! WARNING: The chosen model does not exist !!'+W)\n"," print('Please make sure you provide a valid model path before proceeding further.')"]},{"cell_type":"markdown","metadata":{"id":"xOOXTMHkLqYq"},"source":["## **5.1. Inspection of the loss function**\n","---\n","\n","First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n","\n","**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n","\n","**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n","\n","During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n","\n","Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.\n","\n","In this notebook, the training loss curves are plotted using **tensorboard**. However, all the training results are also logged in a csv file in your model folder."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"-BpIBHDiOTqK"},"outputs":[],"source":["#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n","\n","if os.path.exists(QC_model_folder):\n"," os.chdir(QC_model_folder)\n"," %load_ext tensorboard\n"," %tensorboard --logdir \"$QC_model_folder\"\n","else:\n"," print(\"The chosen model or path does not exist. Check if your model_name was saved with a timestamp.\")"]},{"cell_type":"markdown","metadata":{"id":"PdJFjEXRKApD"},"source":["## **5.2. Error mapping and quality metrics estimation**\n","---\n","\n","This section will display an overlay of the input images ground-truth (solid lines) and predicted boxes (dashed lines). Additionally, the below cell will show the mAP value of the model on the QC data together with plots of the Precision-Recall curves for all the classes in the dataset. If you want to read in more detail about these scores, we recommend [this brief explanation](https://medium.com/@jonathan_hui/map-mean-average-precision-for-object-detection-45c121a31173).\n","\n"," In a nutshell:\n","\n","**Precision:** This is the proportion of the correct classifications (true positives) in all the predictions made by the model.\n","\n","**Recall:** This is the proportion of the detected true positives in all the detectable data.\n","\n"," The files provided in the \"QC_data_folder\" should be under a subfolder called validation which contains the images (e.g. as .jpg) and annotations (.csv files)!"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"8yhm7a3gAFdK"},"outputs":[],"source":["#@markdown ### Provide the path to your quality control dataset.\n","DEFAULT_LOGS_DIR = base_path + \"/gdrive/MyDrive\"\n","QC_data_folder = \"\" #@param {type:\"string\"}\n","#Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","\n","#Use_the_current_trained_model = False #@param {type:\"boolean\"}\n","\n","#@markdown #####During training, the model files are automatically saved inside a folder named after model_name in section 3. Provide the path to this folder below.\n","#QC_model_folder = base_path + \"/gdrive/MyDrive/maskrcnn_nucleus20210202T1206\" #@param {type:\"string\"}\n","\n","#@markdown ###Choose the checkpoint you want to evauluate:\n","Checkpoint = 8#@param {type:\"integer\"}\n","\n","#Load the dataset\n","dataset_val = ClassDataset()\n","dataset_val.load_image_csv(QC_data_folder, \"Validation\")\n","dataset_val.prepare()\n","\n","# Activate the (pre-)trained model\n","\n","detection_min_confidence = 0.35 #@param{type:\"number\"}\n","region_proposal_nms_threshold = 0.99 #@param{type:\"number\"}\n","resize_mode = \"none\" #@param[\"none\",\"square\",\"crop\",\"pad64\"]\n","\n","class InferenceConfig(ClassConfig):\n"," IMAGE_RESIZE_MODE = resize_mode\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n"," NAME = \"nucleus\"\n"," IMAGES_PER_GPU = 1\n"," # Number of classes (including background)\n"," DETECTION_MIN_CONFIDENCE = detection_min_confidence\n"," NUM_CLASSES = len(dataset_val.class_names) # Background + nucleus\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n"," POST_NMS_ROIS_INFERENCE = 15000\n","inference_config = InferenceConfig()\n","\n","# Recreate the model in inference mode\n","#if Use_the_current_trained_model:\n","model = modellib.MaskRCNN(mode=\"inference\", \n"," config=inference_config,\n"," model_dir=QC_model_folder)\n","# else:\n","# model = modellib.MaskRCNN(mode=\"inference\", \n","# config=inference_config,\n","# model_dir=QC_model_folder)\n","\n","# Get path to saved weights\n","if Checkpoint < 10:\n"," qc_model_path = QC_model_folder+\"/mask_rcnn_\"+QC_model_name[:-13]+\"_000\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 100:\n"," qc_model_path = QC_model_folder+\"/mask_rcnn_\"+QC_model_name[:-13]+\"_00\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 1000:\n"," qc_model_path = QC_model_folder+\"/mask_rcnn_\"+QC_model_name[:-13]+\"_0\"+str(Checkpoint)+\".h5\"\n","\n","# Load trained weights\n","print(\"Loading weights from \", qc_model_path)\n","model.load_weights(qc_model_path, by_name=True)\n","\n","# dataset_val = ClassDataset()\n","# num_classes = dataset_val.load_image_csv(QC_data_folder, \"Validation\")\n","# dataset_val.prepare()\n","\n","image_id = random.choice(dataset_val.image_ids)\n","original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n"," modellib.load_image_gt(dataset_val, inference_config, \n"," image_id, use_mini_mask=False)\n","\n","results = model.detect([original_image], verbose=1)\n","r = results[0]\n","visualize.display_differences(original_image, gt_bbox, gt_class_id, gt_mask, r['rois'], r['class_ids'], r['scores'], r['masks'], dataset_val.class_names, iou_threshold = 0.8, score_threshold= 0.8)\n","# visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, \n","# dataset_val.class_names, figsize=(8, 8))\n","\n","save_image(original_image, \"QC_example_data.png\", r['rois'], r['masks'],\n"," r['class_ids'],r['scores'],dataset_val.class_names,\n"," scores_thresh=0,mode=0,save_dir=QC_model_folder+'/Quality Control')"]},{"cell_type":"markdown","metadata":{"id":"IuXEDjWAK6pO"},"source":["##**5.3. Precision-Recall Curve**\n","\n"," The p-r curve can give a quantification how well the model\n","Since the training saves model checkpoints for each epoch, you should choose which one you want to use for quality control in the `Checkpoint` box."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"lzoGZUoCxpSc"},"outputs":[],"source":["#@markdown ###Show the precision-recall curve of the QC data\n","#@markdown Choose an IoU threshold for the p-r plot (between 0 and 1), ignore that the plot title says AP@50:\n","\n","iou_threshold = 0.3 #@param{type:\"number\"}\n","mAP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask,\n"," r['rois'], r['class_ids'], r['scores'], r['masks'],\n"," iou_threshold=iou_threshold)\n","visualize.plot_precision_recall(mAP, precisions, recalls)\n","plt.savefig(QC_model_folder+'/Quality Control/P-R_curve_'+QC_model_name+'.png',bbox_inches='tight',pad_inches=0)\n","\n","gt_match, pred_match, overlaps = utils.compute_matches(gt_bbox, gt_class_id, gt_mask, r['rois'], r['class_ids'], r['scores'], r['masks'])\n","\n","#TO DO: Implement for multiclasses\n","if len(dataset_val.class_names) == 2:\n"," with open (QC_model_folder+'/Quality Control/QC_results.csv','w') as csvfile:\n"," writer = csv.writer(csvfile)\n"," writer.writerow(['class','gt instances','True positives','False Negatives', 'IoU threshold', 'mAP'])\n"," for index in dataset_val.class_names:\n"," if index != 'BG':\n"," writer.writerow([index, str(len(gt_match)), str(len(pred_match)), str(len(gt_match)-len(pred_match)), str(iou_threshold), str(mAP)])\n"," qc_pdf_export()\n","else:\n"," print('Your dataset has more than one class. This means certain features may not be enabled. We are working on implementing this section fully for multiple classes.')"]},{"cell_type":"markdown","metadata":{"id":"MGBi1lB2vSOr"},"source":["# **6. Using the trained model**\n","\n","---\n","\n","In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive."]},{"cell_type":"markdown","metadata":{"id":"HrQPXU0DvWIT"},"source":["## **6.1. Generate prediction(s) from unseen dataset**\n","---\n","\n","The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n","\n","**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n","\n","**`Result_folder`:** This folder will contain the predicted output images."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"7FttSetXvdTB"},"outputs":[],"source":["#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.\n","DEFAULT_LOGS_DIR = base_path + \"/gdrive/MyDrive\"\n","Data_folder = \"\" #@param {type:\"string\"}\n","Result_folder = \"\" #@param {type:\"string\"}\n","\n","# model name and path\n","#@markdown ###Do you want to use the current trained model?\n","Use_the_current_trained_model = True #@param {type:\"boolean\"}\n","\n","#@markdown ###If not, provide the name of the model and path to model folder:\n","Prediction_model_folder = \"\" #@param {type:\"string\"}\n","\n","if Use_the_current_trained_model:\n"," Prediction_model_folder = model_path+'/'+new_model_name\n","\n","#@markdown ###Choose the checkpoint you want to evaluate:\n","Checkpoint = 8#@param {type:\"integer\"}\n","\n","if os.path.exists(Prediction_model_folder+'/Quality Control/class_names.csv'):\n"," print('Prediction classes detected! The model will predict the following classes:')\n"," class_names = []\n"," with open(Prediction_model_folder+'/Quality Control/class_names.csv', 'r') as class_names_csv:\n"," csvreader = csv.reader(class_names_csv)\n"," for row in csvreader:\n"," print(row[0])\n"," class_names.append(row[0])\n","\n","\n","detection_min_confidence = 0.1 #@param{type:\"number\"}\n","region_proposal_nms_threshold = 0.99 #@param{type:\"number\"}\n","resize_mode = \"none\" #@param[\"none\",\"square\",\"crop\",\"pad64\"]\n","post_nms_rois = 10000 #@param{type:\"integer\"}\n","\n","\n","#Load the dataset\n","dataset_val = ClassDataset()\n","dataset_val.load_image_csv(Data_folder, \"Validation\")\n","dataset_val.prepare()\n","\n"," # Activate the (pre-)trained model\n","class InferenceConfig(ClassConfig):\n"," IMAGE_RESIZE_MODE = resize_mode\n"," IMAGE_MIN_DIM = 128\n"," IMAGE_MAX_DIM = 128\n"," IMAGE_MIN_SCALE = 2.0\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n"," #DETECTION_NMS_THRESHOLD = 0.0\n"," NAME = \"nucleus\"\n"," IMAGES_PER_GPU = 1\n"," # Number of classes (including background)\n"," DETECTION_MIN_CONFIDENCE = detection_min_confidence\n"," NUM_CLASSES = len(dataset_val.class_names) # Background + nucleus\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n"," POST_NMS_ROIS_INFERENCE = post_nms_rois\n","\n","inference_config = InferenceConfig()\n","\n","# Recreate the model in inference mode\n","model = modellib.MaskRCNN(mode=\"inference\", \n"," config=inference_config,\n"," model_dir=Prediction_model_folder)\n","\n","# Get path to saved weights\n","if Checkpoint < 10:\n"," pred_model_path = Prediction_model_folder+\"/mask_rcnn_\"+os.path.basename(Prediction_model_folder[:-13])+\"_000\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 100:\n"," pred_model_path = Prediction_model_folder+\"/mask_rcnn_\"+os.path.basename(Prediction_model_folder[:-13])+\"_00\"+str(Checkpoint)+\".h5\"\n","elif Checkpoint < 1000:\n"," pred_model_path = Prediction_model_folder+\"/mask_rcnn_\"+os.path.basename(Prediction_model_folder[:-13])+\"_0\"+str(Checkpoint)+\".h5\"\n","\n","# Load trained weights\n","print(\"Loading weights from \", pred_model_path)\n","model.load_weights(pred_model_path, by_name=True)\n","\n","#@markdown ###Choose how you would like to export the predictions:\n","Export_mode = \"image with class_name,score and mask\" #@param[\"image with bbox, class_name, scores, masks\",\"image with bbox,class_name and score\",\"image with class_name,score and mask\",\"mask with black background\"]\n","if Export_mode == \"image with bbox, class_name, scores, masks\":\n"," export_mode = 0\n","elif Export_mode == \"image with bbox,class_name and score\":\n"," export_mode = 1\n","elif Export_mode == \"image with class_name,score and mask\":\n"," export_mode = 2\n","elif Export_mode == \"mask with black background\":\n"," export_mode = 3\n","\n","\n","file_path = os.path.join(Data_folder, 'Validation')\n","for input in os.listdir(file_path):\n"," if input.endswith('.png'):\n"," image = io.imread(os.path.join(file_path,input))\n"," results = model.detect([image], verbose=0)\n"," r = results[0]\n"," save_image(image, \"predicted_\"+input, r['rois'], r['masks'],\n"," r['class_ids'],r['scores'],class_names,\n"," scores_thresh=0,mode=export_mode,save_dir=Result_folder)\n"]},{"cell_type":"markdown","metadata":{"id":"Yu4OGubv59qa"},"source":["## **6.2. Inspect the predicted output**\n","---\n"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"YnWgQZmlIuv9"},"outputs":[],"source":["#@markdown ##Run this cell to display a randomly chosen input with predicted mask.\n","\n","detection_min_confidence = 0.1 #@param{type:\"number\"}\n","region_proposal_nms_threshold = 0.99 #@param{type:\"number\"}\n","resize_mode = \"none\" #@param[\"none\",\"square\",\"crop\",\"pad64\"]\n","post_nms_rois = 10000 #@param{type:\"integer\"}\n","\n"," # Activate the (pre-)trained model\n","class InferenceConfig(ClassConfig):\n"," IMAGE_RESIZE_MODE = resize_mode\n"," IMAGE_MIN_DIM = 128\n"," IMAGE_MAX_DIM = 128\n"," IMAGE_MIN_SCALE = 2.0\n"," RPN_NMS_THRESHOLD = region_proposal_nms_threshold\n"," #DETECTION_NMS_THRESHOLD = 0.0\n"," NAME = \"nucleus\"\n"," IMAGES_PER_GPU = 1\n"," # Number of classes (including background)\n"," DETECTION_MIN_CONFIDENCE = detection_min_confidence\n"," NUM_CLASSES = len(dataset_val.class_names) # Background + nucleus\n"," RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n"," POST_NMS_ROIS_INFERENCE = post_nms_rois\n","\n","inference_config = InferenceConfig()\n","\n","\n","model = modellib.MaskRCNN(mode=\"inference\", \n"," config=inference_config,\n"," model_dir=Prediction_model_folder)\n","\n","model.load_weights(pred_model_path, by_name=True)\n","example_image = random.choice(os.listdir(os.path.join(Data_folder,'Validation')))\n","\n","if example_image.endswith('.csv'):\n"," example_image = example_image[:-4]\n","\n","display_image = io.imread(file_path+'/'+example_image)\n","results = model.detect([display_image], verbose=0)\n","\n","r = results[0]\n","\n","visualize.display_instances(display_image, r['rois'], r['masks'], r['class_ids'], \n"," class_names, r['scores'], ax=get_ax())"]},{"cell_type":"markdown","metadata":{"id":"BrosGM4Z50gX"},"source":["## **6.3. Download your predictions**\n","---\n","\n","**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name."]},{"cell_type":"markdown","metadata":{"id":"JYfEsBazHhkW"},"source":["# **7. Version log**\n","---\n","**v1.13**: \n","\n","\n","* This notebook is new as ZeroCostDL4Mic version 1.13. and is currently a beta version. \n","* Further edits to this notebook in future versions will be updated in this cell."]},{"cell_type":"markdown","metadata":{"id":"F3zreN5K5S2S"},"source":["#**Thank you for using MaskRCNN**!"]}],"metadata":{"accelerator":"GPU","colab":{"collapsed_sections":["YrTo6T74i7s0","RZL8pqcEi0KY","3yywetML0lUX","F3zreN5K5S2S"],"name":"MaskRCNN_ZeroCostDL4Mic.ipynb","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"}},"nbformat":4,"nbformat_minor":0} diff --git a/Colab_notebooks/RetinaNet_ZeroCostDL4Mic.ipynb b/Colab_notebooks/RetinaNet_ZeroCostDL4Mic.ipynb index 41377dc8..75618186 100644 --- a/Colab_notebooks/RetinaNet_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/RetinaNet_ZeroCostDL4Mic.ipynb @@ -184,6 +184,8 @@ "from colorama import Fore, Back, Style\n", "from sklearn.metrics import average_precision_score\n", "\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", "\n", "from keras import backend as K\n", "import xml.etree.ElementTree as ET\n", @@ -226,7 +228,7 @@ "%cp object_detection/packages/tf2/setup.py .\n", "!python -m pip install .\n", "\n", - "%cd /content/\n", + "%cd \"${base_path}\"\n", "\n", "\n", "import pathlib\n", @@ -726,35 +728,35 @@ " save_boxes_names.append(category_index[boxes_labels[i]+1]['name'])\n", " \n", " #This file will be for later analysis of the bounding boxes in imagej\n", - " if not os.path.exists('/content/predicted_bounding_boxes.csv'):\n", - " with open('/content/predicted_bounding_boxes.csv', 'w', newline='') as csvfile:\n", + " if not os.path.exists(base_path + '/predicted_bounding_boxes.csv'):\n", + " with open(base_path + '/predicted_bounding_boxes.csv', 'w', newline='') as csvfile:\n", " csvwriter = csv.writer(csvfile, delimiter=',')\n", " specs_list = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*len(boxes)\n", " csvwriter.writerow(specs_list)\n", " csvwriter.writerow(save_boxes)\n", " else:\n", - " with open('/content/predicted_bounding_boxes.csv', 'a+', newline='') as csvfile:\n", + " with open(base_path + '/predicted_bounding_boxes.csv', 'a+', newline='') as csvfile:\n", " csvwriter = csv.writer(csvfile)\n", " csvwriter.writerow(save_boxes)\n", " \n", - " if not os.path.exists('/content/predicted_bounding_boxes_names.csv'):\n", - " with open('/content/predicted_bounding_boxes_names.csv', 'w', newline='') as csvfile_names:\n", + " if not os.path.exists(base_path + '/predicted_bounding_boxes_names.csv'):\n", + " with open(base_path + '/predicted_bounding_boxes_names.csv', 'w', newline='') as csvfile_names:\n", " csvwriter = csv.writer(csvfile_names, delimiter=',')\n", " specs_list = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*len(boxes)\n", " csvwriter.writerow(specs_list)\n", " csvwriter.writerow(save_boxes_names)\n", " else:\n", - " with open('/content/predicted_bounding_boxes_names.csv', 'a+', newline='') as csvfile_names:\n", + " with open(base_path + '/predicted_bounding_boxes_names.csv', 'a+', newline='') as csvfile_names:\n", " csvwriter = csv.writer(csvfile_names)\n", " csvwriter.writerow(save_boxes_names)\n", " # #This file is to create a nicer display for the output images\n", - " # if not os.path.exists('/content/predicted_bounding_boxes_display.csv'):\n", - " # with open('/content/predicted_bounding_boxes_display.csv', 'w', newline='') as csvfile_new:\n", + " # if not os.path.exists(base_path + '/predicted_bounding_boxes_display.csv'):\n", + " # with open(base_path + '/predicted_bounding_boxes_display.csv', 'w', newline='') as csvfile_new:\n", " # csvwriter2 = csv.writer(csvfile_new, delimiter=',')\n", " # specs_list = ['filename','width','height','class','xmin','ymin','xmax','ymax']\n", " # csvwriter2.writerow(specs_list)\n", " # else:\n", - " # with open('/content/predicted_bounding_boxes_display.csv','a+',newline='') as csvfile_new:\n", + " # with open(base_path + '/predicted_bounding_boxes_display.csv','a+',newline='') as csvfile_new:\n", " # csvwriter2 = csv.writer(csvfile_new)\n", " # for box in boxes:\n", " # row = [os.path.basename(image_path),image_w,image_h,box.get_label(),int(box.xmin*image_w),int(box.ymin*image_h),int(box.xmax*image_w),int(box.ymax*image_h)]\n", @@ -1378,8 +1380,8 @@ " if visualise_example == True:\n", " pdf.cell(60, 5, txt = 'Example ground-truth annotation', ln=1)\n", " pdf.ln(1)\n", - " exp_size = io.imread('/content/TrainingDataExample_RetinaNet.png').shape\n", - " pdf.image('/content/TrainingDataExample_RetinaNet.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", + " exp_size = io.imread(base_path + '/TrainingDataExample_RetinaNet.png').shape\n", + " pdf.image(base_path + '/TrainingDataExample_RetinaNet.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", " pdf.ln(1)\n", " ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy.\" bioRxiv (2020).'\n", " pdf.multi_cell(190, 5, txt = ref_1, align='L')\n", @@ -1667,7 +1669,7 @@ "\n", "# mount user's Google Drive to Google Colab.\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')" + "drive.mount(base_path + '/gdrive')" ] }, { @@ -1705,8 +1707,8 @@ "outputs": [], "source": [ "#@markdown ###Download an already annotated example dataset.\n", - "!wget -nv --show-progress -A.zip https://zenodo.org/record/3941908//files/YoloV2%20dataset_v1.zip -O \"/content/example_dataset.zip\" \n", - "!unzip -qq -o \"/content/example_dataset.zip\" -d \"/content/example_dataset\"\n", + "!wget -nv --show-progress -A.zip https://zenodo.org/record/3941908//files/YoloV2%20dataset_v1.zip -O base_path + \"/example_dataset.zip\" \n", + "!unzip -qq -o base_path + \"/example_dataset.zip\" -d base_path + \"/example_dataset\"\n", "!rm -rf \"example_dataset.zip\"" ] }, @@ -1730,11 +1732,11 @@ " **Paths for training, predictions and results**\n", "\n", "\n", - "* **`Training_source:`, `Annotations`:** These are the paths to your folders containing the Training_source and the annotation data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below. **Note**: If you are using the [example dataset](#example_dataset) downloaded in the previous cell, the **`Training_source`** field would be *`/content/example_dataset/Training_Images`* and the **`Annotations`** field would be *`/content/example_dataset/Training_Annotations`*.\n", + "* **`Training_source:`, `Annotations`:** These are the paths to your folders containing the Training_source and the annotation data respectively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below. **Note**: If you are using the [example dataset](#example_dataset) downloaded in the previous cell, the **`Training_source`** field would be *`base_folder + /example_dataset/Training_Images`* and the **`Annotations`** field would be *`base_folder + /example_dataset/Training_Annotations`*.\n", "\n", "* **`model_name`:** Enter here a name for your model (e.g.: `RetinaNet`). Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n", "\n", - "* **`model_path`:** Enter the path where your model will be saved once trained (for instance your result folder). For instance, for saving the model trained in the root folder of the Google Drive account chose in the section 1 this field would be */content/gdrive/MyDrive*. \n", + "* **`model_path`:** Enter the path where your model will be saved once trained (for instance your result folder). For instance, for saving the model trained in the root folder of the Google Drive account chose in the section 1 this field would be *base_folder + /gdrive/MyDrive*. \n", "\n", "**Training Parameters**\n", "\n", @@ -2025,7 +2027,7 @@ "\n", " # apply xml_to_csv() function to convert all XML files in images/ folder into labels.csv\n", " labels_df = xml_to_csv(Annotations)\n", - " labels_df.to_csv(('/content/original_labels.csv'), index=None)\n", + " labels_df.to_csv((base_path + '/original_labels.csv'), index=None)\n", " \n", " # Apply flip augmentation\n", " aug = iaa.OneOf([ \n", @@ -2060,7 +2062,7 @@ " \n", " # Concat resized_images_df and augmented_images_df together and save in a new all_labels.csv file\n", " all_labels_df = pd.concat([labels_df, augmented_images_df])\n", - " all_labels_df.to_csv('/content/combined_labels.csv', index=False)\n", + " all_labels_df.to_csv(base_path + '/combined_labels.csv', index=False)\n", "\n", " #Here we convert the new bounding boxes for the augmented images to PASCAL VOC .xml format\n", " def convert_to_xml(df,source,target_folder):\n", @@ -2351,7 +2353,7 @@ " download_weights(Weights_choice)\n", " print('Weights downloaded succesfully.')\n", " checkpoints_file = 'ckpt-0'\n", - " pipeline_config = f'/content/models/research/object_detection/configs/tf2/{Weights_choice}.config'\n", + " pipeline_config = fbase_path + '/models/research/object_detection/configs/tf2/{Weights_choice}.config'\n", "\n", "\n", "\n", @@ -2467,7 +2469,7 @@ "Use_the_current_trained_model = True #@param {type:\"boolean\"}\n", "\n", "# @markdown ###If not, please provide the name of the model folder:\n", - "# @markdown ***Note:*** *The model folder is the path of an already trained model in some previous usage of this notebook. E.g.: `/content/gdrive/MyDrive/RetinaNet`*\n", + "# @markdown ***Note:*** *The model folder is the path of an already trained model in some previous usage of this notebook. E.g.: `base_path + /gdrive/MyDrive/RetinaNet`*\n", "\n", "QC_model_folder = \"\" #@param {type:\"string\"}\n", "if QC_model_folder and QC_model_folder[-1] == '/':\n", @@ -2578,7 +2580,7 @@ "\n", " The images provided in the \"Source_QC_folder\" and \"Target_QC_folder\" should contain images (e.g. as .jpg) and annotations (.xml files)!\n", "\n", - "For example, if you want to evaluate your model over the example dataset from [section 3](#example_dataset), the paths you should use are *`/content/example_dataset/Test_Images`* for the Source_QC_folder and *`/content/example_dataset/Test_Annotations`* for the Annotations_QC_folder.\n", + "For example, if you want to evaluate your model over the example dataset from [section 3](#example_dataset), the paths you should use are *`base_path + /example_dataset/Test_Images`* for the Source_QC_folder and *`base_path + /example_dataset/Test_Annotations`* for the Annotations_QC_folder.\n", "\n", "**mAP score:** This refers to the mean average precision of the model on the given dataset. This value gives an indication how precise the predictions of the classes on this dataset are when compared to the ground-truth. Values closer to 1 indicate a good fit.\n", "\n", @@ -2613,10 +2615,10 @@ "\n", "#Delete old csv with box predictions if one exists\n", "\n", - "if os.path.exists('/content/predicted_bounding_boxes.csv'):\n", - " os.remove('/content/predicted_bounding_boxes.csv')\n", - "if os.path.exists('/content/predicted_bounding_boxes_names.csv'):\n", - " os.remove('/content/predicted_bounding_boxes_names.csv')\n", + "if os.path.exists(base_path + '/predicted_bounding_boxes.csv'):\n", + " os.remove(base_path + '/predicted_bounding_boxes.csv')\n", + "if os.path.exists(base_path + '/predicted_bounding_boxes_names.csv'):\n", + " os.remove(base_path + '/predicted_bounding_boxes_names.csv')\n", "if os.path.exists(Source_QC_folder+'/.ipynb_checkpoints'):\n", " shutil.rmtree(Source_QC_folder+'/.ipynb_checkpoints')\n", "\n", @@ -2636,7 +2638,7 @@ "\n", "# #Make a csv file to read into imagej macro, to create custom bounding boxes\n", "header = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*max(n_objects)\n", - "with open('/content/predicted_bounding_boxes.csv', newline='') as inFile, open('/content/predicted_bounding_boxes_new.csv', 'w', newline='') as outfile:\n", + "with open(base_path + '/predicted_bounding_boxes.csv', newline='') as inFile, open(base_path + '/predicted_bounding_boxes_new.csv', 'w', newline='') as outfile:\n", " r = csv.reader(inFile)\n", " w = csv.writer(outfile)\n", " next(r, None) # skip the first row from the reader, the old header\n", @@ -2646,7 +2648,7 @@ " for row in r:\n", " w.writerow(row)\n", "\n", - "df_bbox=pd.read_csv('/content/predicted_bounding_boxes_new.csv',error_bad_lines=False)\n", + "df_bbox=pd.read_csv(base_path + '/predicted_bounding_boxes_new.csv',error_bad_lines=False)\n", "df_bbox=df_bbox.transpose()\n", "new_header = df_bbox.iloc[0] #grab the first row for the header\n", "df_bbox = df_bbox[1:] #take the data less the header row\n", @@ -2699,7 +2701,7 @@ "\n", "\n", "# --------------------------------------------------------------\n", - "add_header('/content/predicted_bounding_boxes_names.csv','/content/predicted_bounding_boxes_names_new.csv')\n", + "add_header(base_path + '/predicted_bounding_boxes_names.csv',base_path + '/predicted_bounding_boxes_names_new.csv')\n", "\n", "# # # This will display a randomly chosen dataset input and predicted output\n", "\n", @@ -2823,7 +2825,7 @@ "\n", "**`Result_folder`:** This folder will contain the predicted output images. In case the folder does not exist it will be created.\n", "\n", - "**`Prediction_model_path`:** This should be the folder that contains your model. The model folder is the path of an already trained model in some previous usage of this notebook. E.g.: *`/content/gdrive/MyDrive/RetinaNet`*\n", + "**`Prediction_model_path`:** This should be the folder that contains your model. The model folder is the path of an already trained model in some previous usage of this notebook. E.g.: *`base_path + /gdrive/MyDrive/RetinaNet`*\n", "\n", "**`Score_threshold`:** Insert the threshold of accuracy to the model, this is, the minimum percentage of confidence of the model in the predictions. Decimal number between 0 and 1. If the threshold is 1 there will be shown only the predictions with 100% of confidence (difficult) and, otherwise, if the threshold is 0 there will be shown all the predictions. " ] @@ -2881,14 +2883,14 @@ "start = time.time()\n", "\n", "#Remove any files that might be from the prediction of QC examples.\n", - "if os.path.exists('/content/predicted_bounding_boxes.csv'):\n", - " os.remove('/content/predicted_bounding_boxes.csv')\n", - "if os.path.exists('/content/predicted_bounding_boxes_new.csv'):\n", - " os.remove('/content/predicted_bounding_boxes_new.csv')\n", - "if os.path.exists('/content/predicted_bounding_boxes_names.csv'):\n", - " os.remove('/content/predicted_bounding_boxes_names.csv')\n", - "if os.path.exists('/content/predicted_bounding_boxes_names_new.csv'):\n", - " os.remove('/content/predicted_bounding_boxes_names_new.csv')\n", + "if os.path.exists(base_path + '/predicted_bounding_boxes.csv'):\n", + " os.remove(base_path + '/predicted_bounding_boxes.csv')\n", + "if os.path.exists(base_path + '/predicted_bounding_boxes_new.csv'):\n", + " os.remove(base_path + '/predicted_bounding_boxes_new.csv')\n", + "if os.path.exists(base_path + '/predicted_bounding_boxes_names.csv'):\n", + " os.remove(base_path + '/predicted_bounding_boxes_names.csv')\n", + "if os.path.exists(base_path + '/predicted_bounding_boxes_names_new.csv'):\n", + " os.remove(base_path + '/predicted_bounding_boxes_names_new.csv')\n", "\n", "\n", "if os.path.exists(Data_folder+'/.ipynb_checkpoints'):\n", @@ -2911,14 +2913,14 @@ " if img.endswith('detected'+file_suffix):\n", " shutil.move(Data_folder+'/'+img,Result_folder+'/'+img)\n", "\n", - "if os.path.exists('/content/predicted_bounding_boxes.csv'):\n", + "if os.path.exists(base_path + '/predicted_bounding_boxes.csv'):\n", " print('Bounding box labels and coordinates saved to '+ Result_folder)\n", "else:\n", " print('For some reason the bounding box labels and coordinates were not saved. Check that your predictions look as expected.')\n", "\n", "#Make a csv file to read into imagej macro, to create custom bounding boxes\n", "header = ['filename']+['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class']*100#max(n_objects)\n", - "with open('/content/predicted_bounding_boxes.csv', newline='') as inFile, open('/content/predicted_bounding_boxes_new.csv', 'w', newline='') as outfile:\n", + "with open(base_path + '/predicted_bounding_boxes.csv', newline='') as inFile, open(base_path + '/predicted_bounding_boxes_new.csv', 'w', newline='') as outfile:\n", " r = csv.reader(inFile)\n", " w = csv.writer(outfile)\n", " next(r, None) # skip the first row from the reader, the old header\n", @@ -2928,7 +2930,7 @@ " for row in r:\n", " w.writerow(row)\n", "\n", - "df_bbox=pd.read_csv('/content/predicted_bounding_boxes_new.csv')\n", + "df_bbox=pd.read_csv(base_path + '/predicted_bounding_boxes_new.csv')\n", "df_bbox=df_bbox.transpose()\n", "new_header = df_bbox.iloc[0] #grab the first row for the header\n", "df_bbox = df_bbox[1:] #take the data less the header row\n", @@ -2987,10 +2989,10 @@ "plt.imshow(y, interpolation='nearest')\n", "plt.title('Predicted output');\n", "\n", - "add_header('/content/predicted_bounding_boxes_names.csv','/content/predicted_bounding_boxes_names_new.csv')\n", + "add_header(base_path + '/predicted_bounding_boxes_names.csv',base_path + '/predicted_bounding_boxes_names_new.csv')\n", "\n", "#We need to edit this predicted_bounding_boxes_new.csv file slightly to display the bounding boxes\n", - "df_bbox2 = pd.read_csv('/content/predicted_bounding_boxes_names_new.csv')\n", + "df_bbox2 = pd.read_csv(base_path + '/predicted_bounding_boxes_names_new.csv')\n", "for img in range(0,df_bbox2.shape[0]):\n", " df_bbox2.iloc[img]\n", " row = pd.DataFrame(df_bbox2.iloc[img])\n", @@ -3011,7 +3013,7 @@ " y2=row[img][i+3])\n", " \n", " \n", - " plt.savefig('/content/detected_cells.png',bbox_inches='tight',transparent=True,pad_inches=0)\n", + " plt.savefig(base_path + '/detected_cells.png',bbox_inches='tight',transparent=True,pad_inches=0)\n", "plt.show() ## show the plot\n" ] }, diff --git a/Colab_notebooks/SplineDist_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/SplineDist_2D_ZeroCostDL4Mic.ipynb index 8d9404b5..e46226e0 100644 --- a/Colab_notebooks/SplineDist_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/SplineDist_2D_ZeroCostDL4Mic.ipynb @@ -221,7 +221,7 @@ "from builtins import any as b_any\n", "\n", "def get_requirements_path():\n", - " # Store requirements file in 'contents' directory\n", + " # Store requirements file in 'base_path' directory\n", " current_dir = os.getcwd()\n", " dir_count = current_dir.count('/') - 1\n", " path = '../' * (dir_count) + 'requirements.txt'\n", @@ -285,7 +285,10 @@ "\n", "import os\n", "\n", - "os.chdir(\"/content/splinedist\")\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", + "\n", + "os.chdir(base_path + \"/splinedist\")\n", "\n", "!python setup.py install\n", "!python splinegenerator.py install\n", @@ -296,7 +299,7 @@ "from splinedist.matching import matching, matching_dataset\n", "from splinedist.models import Config2D, SplineDist2D, SplineDistData2D\n", "\n", - "os.chdir(\"/content\")\n", + "os.chdir(base_path)\n", "# ------- Variable specific to SplineDist -------\n", "from csbdeep.utils import Path, normalize\n", "from zipfile import ZIP_DEFLATED\n", @@ -573,8 +576,8 @@ " pdf.ln(1)\n", " pdf.cell(60, 5, txt = 'Example Training pair', ln=1)\n", " pdf.ln(1)\n", - " exp_size = io.imread('/content/TrainingDataExample_SplineDist2D.png').shape\n", - " pdf.image('/content/TrainingDataExample_SplineDist2D.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", + " exp_size = io.imread(base_path + '/TrainingDataExample_SplineDist2D.png').shape\n", + " pdf.image(base_path + '/TrainingDataExample_SplineDist2D.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n", " pdf.ln(1)\n", " ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n", " pdf.multi_cell(190, 5, txt = ref_1, align='L')\n", @@ -880,7 +883,7 @@ "\n", "# mount user's Google Drive to Google Colab.\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')" + "drive.mount(base_path + '/gdrive')" ] }, { @@ -1083,7 +1086,7 @@ "plt.imshow(y, interpolation='nearest', cmap=lbl_cmap)\n", "plt.title('Training target')\n", "plt.axis('off');\n", - "plt.savefig('/content/TrainingDataExample_SplineDist2D.png',bbox_inches='tight',pad_inches=0)" + "plt.savefig(base_path + '/TrainingDataExample_SplineDist2D.png',bbox_inches='tight',pad_inches=0)" ] }, { diff --git a/Colab_notebooks/WGAN_ZeroCostDL4Mic.ipynb b/Colab_notebooks/WGAN_ZeroCostDL4Mic.ipynb index 83ff05d4..3325bfa8 100644 --- a/Colab_notebooks/WGAN_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/WGAN_ZeroCostDL4Mic.ipynb @@ -6,7 +6,7 @@ "id": "F4bwTMNakNqq" }, "source": [ - "#WGAN (2D)\n", + "# WGAN (2D)\n", "---\n", "\n", "**Wasserstein Generative Adversarial Network** (WGAN) is an alternative to traditional GAN training, it was published by [Arjovsky, Martin and Chintala, Soumith and Bottou, Leon](http://proceedings.mlr.press/v70/arjovsky17a/arjovsky17a.pdf). This network aims to recover a high-resolution (HR) image from a low-resolution (LR) image and in order to achieve it, a new loss function is proposed: Wasserstein distance or Earth Mover's Distance. They claim a better stability of learning wich is one of the main problems in GAN training.\n", @@ -43,7 +43,7 @@ "\n", "\n", "---\n", - "###**Structure of a notebook**\n", + "### **Structure of a notebook**\n", "\n", "The notebook contains two types of cell: \n", "\n", @@ -52,7 +52,7 @@ "**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n", "\n", "---\n", - "###**Table of contents, Code snippets** and **Files**\n", + "### **Table of contents, Code snippets** and **Files**\n", "\n", "On the top left side of the notebook you find three tabs which contain from top to bottom:\n", "\n", @@ -81,7 +81,7 @@ "id": "f7xAqzMpi8ht" }, "source": [ - "#**0. Before getting started**\n", + "# **0. Before getting started**\n", "---\n", "\n", "**We strongly recommend that you generate extra paired images. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.\n", @@ -151,6 +151,9 @@ "import csv\n", "import os\n", "\n", + "#Create a variable to get and store relative base path\n", + "base_path = os.getcwd()\n", + "\n", "from collections import defaultdict, OrderedDict\n", "from tqdm import tqdm\n", "from datetime import datetime\n", @@ -188,7 +191,7 @@ "\n", "###\n", "def get_requirements_path():\n", - " # Store requirements file in 'contents' directory \n", + " # Store requirements file in 'base_path' directory \n", " current_dir = os.getcwd()\n", " dir_count = current_dir.count('/') - 1\n", " path = '../' * (dir_count) + 'requirements.txt'\n", @@ -1453,7 +1456,7 @@ "#mounts user's Google Drive to Google Colab.\n", "\n", "from google.colab import drive\n", - "drive.mount('/content/gdrive')" + "drive.mount(base_path + '/gdrive')" ] }, { @@ -2053,7 +2056,7 @@ "\n", "# Insert code to perform predictions on all datasets in the Source_QC folder\n", "if Only_high_resolution_data:\n", - " Source_QC_folder = \"/content/LR_images\"\n", + " Source_QC_folder = base_path + \"/LR_images\"\n", " Target_QC_folder = QC_high_resolution_folder\n", "\n", "# If only HR data, generate a folder to save generated LR data\n",