Skip to content

Commit

Permalink
Avoid /content in new Colab_notebooks
Browse files Browse the repository at this point in the history
  • Loading branch information
IvanHCenalmor committed Nov 8, 2023
1 parent 97854c8 commit 602bb6b
Show file tree
Hide file tree
Showing 11 changed files with 133 additions and 116 deletions.
37 changes: 20 additions & 17 deletions Colab_notebooks/3D_RCAN_ZeroCostDL4Mic.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@
"from builtins import any as b_any\n",
"\n",
"def get_requirements_path():\n",
" # Store requirements file in 'contents' directory\n",
" # Store requirements file in 'base_path' directory\n",
" current_dir = os.getcwd()\n",
" dir_count = current_dir.count('/') - 1\n",
" path = '../' * (dir_count) + 'requirements.txt'\n",
Expand Down Expand Up @@ -215,6 +215,9 @@
"import os\n",
"import pandas as pd\n",
"\n",
"#Create a variable to get and store relative base path\n",
"base_path = os.getcwd()\n",
"\n",
"!pip uninstall -y keras-nightly\n",
"\n",
"\n",
Expand Down Expand Up @@ -520,8 +523,8 @@
" pdf.ln(1)\n",
" pdf.cell(60, 5, txt = 'Example Training pair', ln=1)\n",
" pdf.ln(1)\n",
" exp_size = io.imread('/content/TrainingDataExample_3D_RCAN.png').shape\n",
" pdf.image('/content/TrainingDataExample_3D_RCAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" exp_size = io.imread(base_path + '/TrainingDataExample_3D_RCAN.png').shape\n",
" pdf.image(base_path + '/TrainingDataExample_3D_RCAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" pdf.ln(1)\n",
" ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n",
" pdf.multi_cell(190, 5, txt = ref_1, align='L')\n",
Expand All @@ -538,7 +541,7 @@
" if trained:\n",
" pdf.output(model_path+'/'+model_name+'/'+model_name+\"_training_report.pdf\")\n",
" else:\n",
" pdf.output('/content/'+model_name+\"_training_report.pdf\")\n",
" pdf.output(base_path + '/'+model_name+\"_training_report.pdf\")\n",
"\n",
"\n",
"def qc_pdf_export():\n",
Expand Down Expand Up @@ -758,7 +761,7 @@
"\n",
"# mount user's Google Drive to Google Colab.\n",
"from google.colab import drive\n",
"drive.mount('/content/gdrive')"
"drive.mount(base_path + '/gdrive')"
]
},
{
Expand Down Expand Up @@ -846,7 +849,7 @@
"#@markdown ###Path to training images:\n",
"\n",
"# base folder of GT and low images\n",
"base = \"/content\"\n",
"base = base_path + \"\"\n",
"\n",
"# low SNR images\n",
"Training_source = \"\" #@param {type:\"string\"}\n",
Expand Down Expand Up @@ -944,26 +947,26 @@
"File_for_validation = int((number_files)/percentage_validation)+1\n",
"\n",
"#Here we split the training dataset between training and validation\n",
"# Everything is copied in the /Content Folder\n",
"# Everything is copied in the \"base_path\" Folder\n",
"\n",
"Training_source_temp = \"/content/training_source\"\n",
"Training_source_temp = base_path + \"/training_source\"\n",
"\n",
"if os.path.exists(Training_source_temp):\n",
" shutil.rmtree(Training_source_temp)\n",
"os.makedirs(Training_source_temp)\n",
"\n",
"Training_target_temp = \"/content/training_target\"\n",
"Training_target_temp = base_path + \"/training_target\"\n",
"if os.path.exists(Training_target_temp):\n",
" shutil.rmtree(Training_target_temp)\n",
"os.makedirs(Training_target_temp)\n",
"\n",
"Validation_source_temp = \"/content/validation_source\"\n",
"Validation_source_temp = base_path + \"/validation_source\"\n",
"\n",
"if os.path.exists(Validation_source_temp):\n",
" shutil.rmtree(Validation_source_temp)\n",
"os.makedirs(Validation_source_temp)\n",
"\n",
"Validation_target_temp = \"/content/validation_target\"\n",
"Validation_target_temp = base_path + \"/validation_target\"\n",
"if os.path.exists(Validation_target_temp):\n",
" shutil.rmtree(Validation_target_temp)\n",
"os.makedirs(Validation_target_temp)\n",
Expand Down Expand Up @@ -1007,7 +1010,7 @@
"plt.imshow(y[mid_plane], norm=simple_norm(y[mid_plane], percent = 99), interpolation='nearest')\n",
"plt.axis('off')\n",
"plt.title('High SNR image (single Z plane)');\n",
"plt.savefig('/content/TrainingDataExample_3D_RCAN.png',bbox_inches='tight',pad_inches=0)"
"plt.savefig(base_path + '/TrainingDataExample_3D_RCAN.png',bbox_inches='tight',pad_inches=0)"
]
},
{
Expand Down Expand Up @@ -1063,7 +1066,7 @@
"\n",
"\n",
"if not Save_augmented_images:\n",
" Saving_path= \"/content\"\n",
" Saving_path= base_path + \"\"\n",
"\n",
"\n",
"def rotation_aug(Source_path, Target_path, flip=False):\n",
Expand Down Expand Up @@ -1235,7 +1238,7 @@
" \n",
"json_object = json.dumps(dictionary, indent = 4) \n",
" \n",
"with open(\"/content/config.json\", \"w\") as outfile: \n",
"with open(base_path + \"/config.json\", \"w\") as outfile: \n",
" outfile.write(json_object)\n",
"\n",
"# Export pdf summary of training parameters\n",
Expand Down Expand Up @@ -1275,7 +1278,7 @@
"start = time.time()\n",
"\n",
"# Start Training\n",
"!python /content/3D-RCAN/train.py -c /content/config.json -o \"$full_model_path\"\n",
"!python \"$base_path\"/3D-RCAN/train.py -c \"$base_path\"/config.json -o \"$full_model_path\"\n",
"\n",
"print(\"Training, done.\")\n",
"\n",
Expand Down Expand Up @@ -1449,7 +1452,7 @@
"\n",
"print(\"Restoring images...\")\n",
"\n",
"!python /content/3D-RCAN/apply.py -m \"$full_QC_model_path\" -i \"$Source_QC_folder\" -o \"$path_QC_prediction\"\n",
"!python \"$base_path\"/3D-RCAN/apply.py -m \"$full_QC_model_path\" -i \"$Source_QC_folder\" -o \"$path_QC_prediction\"\n",
"\n",
"print(\"Done...\")\n",
"\n",
Expand Down Expand Up @@ -1836,7 +1839,7 @@
"\n",
"print(\"Restoring images...\")\n",
"\n",
"!python /content/3D-RCAN/apply.py -m \"$full_Prediction_model_path\" -i \"$Data_folder\" -o \"$Result_folder\"\n",
"!python \"$base_path\"/3D-RCAN/apply.py -m \"$full_Prediction_model_path\" -i \"$Data_folder\" -o \"$Result_folder\"\n",
"\n",
"print(\"Images saved into the result folder:\", Result_folder)\n",
"\n",
Expand Down
13 changes: 8 additions & 5 deletions Colab_notebooks/DFCAN_ZeroCostDL4Mic.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,9 @@
"import warnings\n",
"warnings.filterwarnings('ignore')\n",
"\n",
"#Create a variable to get and store relative base path\n",
"base_path = os.getcwd()\n",
"\n",
"# -------------- Other definitions -----------\n",
"W = '\\033[0m' # white (normal)\n",
"R = '\\033[31m' # red\n",
Expand Down Expand Up @@ -1025,8 +1028,8 @@
" pdf.ln(1)\n",
" pdf.cell(60, 5, txt = 'Example Training pair', ln=1)\n",
" pdf.ln(1)\n",
" exp_size = io.imread(\"/content/ExampleData.png\").shape\n",
" pdf.image(\"/content/ExampleData.png\", x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" exp_size = io.imread(base_path + \"/ExampleData.png\").shape\n",
" pdf.image(base_path + \"/ExampleData.png\", x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" pdf.ln(1)\n",
" ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy.\" BioRxiv (2020).'\n",
" pdf.multi_cell(190, 5, txt = ref_1, align='L')\n",
Expand Down Expand Up @@ -1322,7 +1325,7 @@
"#mounts user's Google Drive to Google Colab.\n",
"\n",
"from google.colab import drive\n",
"drive.mount('/content/gdrive')\n",
"drive.mount(base_path + '/gdrive')\n",
"\n",
"\n"
]
Expand Down Expand Up @@ -1611,7 +1614,7 @@
"\n",
" if pretrained_model_choice == \"Model_name\":\n",
" pretrained_model_name = \"Model_name\"\n",
" pretrained_model_path = \"/content/\"+pretrained_model_name\n",
" pretrained_model_path = base_path + \"/\"+pretrained_model_name\n",
" print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n",
" if os.path.exists(pretrained_model_path):\n",
" shutil.rmtree(pretrained_model_path)\n",
Expand Down Expand Up @@ -1730,7 +1733,7 @@
"plt.subplot(1, 2, 2)\n",
"plt.imshow( train_patches_gt[0], 'gray' )\n",
"plt.title( 'Training patch at full resolution' )\n",
"plt.savefig('/content/ExampleData.png', bbox_inches='tight', pad_inches=0)\n",
"plt.savefig(base_path + '/ExampleData.png', bbox_inches='tight', pad_inches=0)\n",
"\n",
"# Prepare the training data and create data generators\n",
"# training input\n",
Expand Down
2 changes: 1 addition & 1 deletion Colab_notebooks/DRMIME_2D_ZeroCostDL4Mic.ipynb

Large diffs are not rendered by default.

26 changes: 15 additions & 11 deletions Colab_notebooks/DecoNoising_2D_ZeroCostDL4Mic.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -157,13 +157,17 @@
"Notebook_version = '1.13.1'\n",
"Network = 'DecoNoising'\n",
"\n",
"import os \n",
"\n",
"#Create a variable to get and store relative base path\n",
"base_path = os.getcwd()\n",
"\n",
"#@markdown ##Install DecoNoising and dependencies\n",
"\n",
"from builtins import any as b_any\n",
"\n",
"def get_requirements_path():\n",
" # Store requirements file in 'contents' directory\n",
" # Store requirements file in 'base_path' directory\n",
" current_dir = os.getcwd()\n",
" dir_count = current_dir.count('/') - 1\n",
" path = '../' * (dir_count) + 'requirements.txt'\n",
Expand Down Expand Up @@ -208,11 +212,11 @@
"\n",
"!git clone https://github.com/juglab/PN2V\n",
"\n",
"sys.path.append('/content/PN2V')\n",
"sys.path.append(base_path + '/PN2V')\n",
"\n",
"from pn2v import training\n",
"\n",
"sys.path.append('/content/DecoNoising')\n",
"sys.path.append(base_path + '/DecoNoising')\n",
"\n",
"import matplotlib.pyplot as plt\n",
"from unet.model import UNet\n",
Expand Down Expand Up @@ -447,8 +451,8 @@
" pdf.ln(1)\n",
" pdf.cell(60, 5, txt = 'Example Training Image', ln=1)\n",
" pdf.ln(1)\n",
" exp_size = io.imread('/content/TrainingDataExample.png').shape\n",
" pdf.image('/content/TrainingDataExample.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" exp_size = io.imread(base_path + '/TrainingDataExample.png').shape\n",
" pdf.image(base_path + '/TrainingDataExample.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" pdf.ln(1)\n",
" ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n",
" pdf.multi_cell(190, 5, txt = ref_1, align='L')\n",
Expand Down Expand Up @@ -719,7 +723,7 @@
"\n",
"# mount user's Google Drive to Google Colab.\n",
"from google.colab import drive\n",
"drive.mount('/content/gdrive')"
"drive.mount(base_path + '/gdrive')"
]
},
{
Expand Down Expand Up @@ -880,15 +884,15 @@
" Noisy_for_validation = 1\n",
"\n",
"#Here we split the training dataset between training and validation\n",
"# Everything is copied in the /Content Folder\n",
"Training_source_temp = \"/content/training_source\"\n",
"# Everything is copied in the 'base_path' Folder\n",
"Training_source_temp = base_path + \"/training_source\"\n",
"\n",
"if os.path.exists(Training_source_temp):\n",
" shutil.rmtree(Training_source_temp)\n",
"os.makedirs(Training_source_temp)\n",
"\n",
"\n",
"Validation_source_temp = \"/content/validation_source\"\n",
"Validation_source_temp = base_path + \"/validation_source\"\n",
"\n",
"if os.path.exists(Validation_source_temp):\n",
" shutil.rmtree(Validation_source_temp)\n",
Expand Down Expand Up @@ -921,7 +925,7 @@
"plt.imshow(x, interpolation='nearest', norm=norm, cmap='magma')\n",
"plt.title('Training source')\n",
"plt.axis('off');\n",
"plt.savefig('/content/TrainingDataExample.png',bbox_inches='tight',pad_inches=0)\n"
"plt.savefig(base_path + '/TrainingDataExample.png',bbox_inches='tight',pad_inches=0)\n"
]
},
{
Expand Down Expand Up @@ -1110,7 +1114,7 @@
"\n",
" if pretrained_model_choice == \"Model_name\":\n",
" pretrained_model_name = \"Model_name\"\n",
" pretrained_model_path = \"/content/\"+pretrained_model_name\n",
" pretrained_model_path = base_path + \"/\"+pretrained_model_name\n",
" print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n",
" if os.path.exists(pretrained_model_path):\n",
" shutil.rmtree(pretrained_model_path)\n",
Expand Down
25 changes: 14 additions & 11 deletions Colab_notebooks/DenoiSeg_ZeroCostDL4Mic.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@
"from builtins import any as b_any\n",
"\n",
"def get_requirements_path():\n",
" # Store requirements file in 'contents' directory\n",
" # Store requirements file in 'base_path' directory\n",
" current_dir = os.getcwd()\n",
" dir_count = current_dir.count('/') - 1\n",
" path = '../' * (dir_count) + 'requirements.txt'\n",
Expand Down Expand Up @@ -333,6 +333,9 @@
"import subprocess\n",
"from pip._internal.operations.freeze import freeze\n",
"\n",
"#Create a variable to get and store relative base path\n",
"base_path = os.getcwd()\n",
"\n",
"# Colors for the warning messages\n",
"class bcolors:\n",
" WARNING = '\\033[31m'\n",
Expand Down Expand Up @@ -509,8 +512,8 @@
" pdf.ln(1)\n",
" pdf.cell(60, 5, txt = 'Example Training pair', ln=1)\n",
" pdf.ln(1)\n",
" exp_size = io.imread('/content/TrainingDataExample_DenoiSeg.png').shape\n",
" pdf.image('/content/TrainingDataExample_DenoiSeg.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" exp_size = io.imread(base_path + '/TrainingDataExample_DenoiSeg.png').shape\n",
" pdf.image(base_path + '/TrainingDataExample_DenoiSeg.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n",
" pdf.ln(1)\n",
" ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n",
" pdf.multi_cell(190, 5, txt = ref_1, align='L')\n",
Expand Down Expand Up @@ -800,7 +803,7 @@
"\n",
"# mount user's Google Drive to Google Colab.\n",
"from google.colab import drive\n",
"drive.mount('/content/gdrive')"
"drive.mount(base_path + '/gdrive')"
]
},
{
Expand Down Expand Up @@ -953,25 +956,25 @@
"\n",
"\n",
"#Here we split the training dataset between training and validation\n",
"# Everything is copied in the /Content Folder\n",
"Training_source_temp = \"/content/training_source\"\n",
"# Everything is copied in the 'base_path' Folder\n",
"Training_source_temp = base_path + \"/training_source\"\n",
"\n",
"if os.path.exists(Training_source_temp):\n",
" shutil.rmtree(Training_source_temp)\n",
"os.makedirs(Training_source_temp)\n",
"\n",
"Training_target_temp = \"/content/training_target\"\n",
"Training_target_temp = base_path + \"/training_target\"\n",
"if os.path.exists(Training_target_temp):\n",
" shutil.rmtree(Training_target_temp)\n",
"os.makedirs(Training_target_temp)\n",
"\n",
"Validation_source_temp = \"/content/validation_source\"\n",
"Validation_source_temp = base_path + \"/validation_source\"\n",
"\n",
"if os.path.exists(Validation_source_temp):\n",
" shutil.rmtree(Validation_source_temp)\n",
"os.makedirs(Validation_source_temp)\n",
"\n",
"Validation_target_temp = \"/content/validation_target\"\n",
"Validation_target_temp = base_path + \"/validation_target\"\n",
"if os.path.exists(Validation_target_temp):\n",
" shutil.rmtree(Validation_target_temp)\n",
"os.makedirs(Validation_target_temp)\n",
Expand Down Expand Up @@ -1015,7 +1018,7 @@
"plt.imshow(y, interpolation='nearest', vmin=0, vmax=1, cmap='viridis')\n",
"plt.title('Training target')\n",
"plt.axis('off');\n",
"plt.savefig('/content/TrainingDataExample_DenoiSeg.png',bbox_inches='tight',pad_inches=0)\n",
"plt.savefig(base_path + '/TrainingDataExample_DenoiSeg.png',bbox_inches='tight',pad_inches=0)\n",
"\n"
]
},
Expand Down Expand Up @@ -1113,7 +1116,7 @@
"\n",
" if pretrained_model_choice == \"Model_name\":\n",
" pretrained_model_name = \"Model_name\"\n",
" pretrained_model_path = \"/content/\"+pretrained_model_name\n",
" pretrained_model_path = base_path + \"/\"+pretrained_model_name\n",
" print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n",
" if os.path.exists(pretrained_model_path):\n",
" shutil.rmtree(pretrained_model_path)\n",
Expand Down
2 changes: 1 addition & 1 deletion Colab_notebooks/Detectron2_2D_ZeroCostDL4Mic.ipynb

Large diffs are not rendered by default.

Loading

0 comments on commit 602bb6b

Please sign in to comment.