Skip to content

Commit

Permalink
Merge pull request FABLE-3DXRD#391 from jadball/master
Browse files Browse the repository at this point in the history
Parameterise S3DXRD notebooks
  • Loading branch information
jonwright authored Feb 13, 2025
2 parents dd5989a + 76cc952 commit 944972c
Show file tree
Hide file tree
Showing 29 changed files with 68,775 additions and 1,165 deletions.
86 changes: 51 additions & 35 deletions ImageD11/nbGui/S3DXRD/0_segment_and_label.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -23,30 +23,24 @@
},
"outputs": [],
"source": [
"exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n",
"PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )"
"exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5726795e-91cf-40cf-b3a9-b114de84e017",
"id": "c3bddb80-39f9-4cd7-9cc1-59fc8d240c24",
"metadata": {
"tags": []
"tags": [
"parameters"
]
},
"outputs": [],
"source": [
"# Import needed packages\n",
"%matplotlib ipympl\n",
"import pprint\n",
"import numpy as np\n",
"import ImageD11.sinograms.dataset\n",
"import ImageD11.sinograms.lima_segmenter\n",
"import ImageD11.sinograms.assemble_label\n",
"import ImageD11.sinograms.properties\n",
"import ImageD11.nbGui.nb_utils as utils\n",
"from ImageD11.nbGui import segmenter_gui\n",
"# this cell is tagged with 'parameters'\n",
"# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n",
"\n",
"PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n",
"\n",
"# Experts : update these files for your detector if you need to\n",
"maskfile = \"/data/id11/nanoscope/Eiger/eiger_mask_E-08-0144_20240205.edf\"\n",
Expand All @@ -57,7 +51,36 @@
"dtymotor = 'dty'\n",
"\n",
"# Default segmentation options\n",
"options = { 'cut' : 1, 'pixels_in_spot' : 3, 'howmany' : 100000 }"
"options = { 'cut' : 1, 'pixels_in_spot' : 3, 'howmany' : 100000 }\n",
"\n",
"# EXPERTS: These can be provided as papermill parameters. Users, leave these as None for now...\n",
"dataroot = None\n",
"analysisroot = None\n",
"sample = None\n",
"dataset = None\n",
"\n",
"dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5726795e-91cf-40cf-b3a9-b114de84e017",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Import needed packages\n",
"%matplotlib ipympl\n",
"import pprint\n",
"import numpy as np\n",
"import ImageD11.sinograms.dataset\n",
"import ImageD11.sinograms.lima_segmenter\n",
"import ImageD11.sinograms.assemble_label\n",
"import ImageD11.sinograms.properties\n",
"import ImageD11.nbGui.nb_utils as utils\n",
"from ImageD11.nbGui import segmenter_gui"
]
},
{
Expand All @@ -70,7 +93,8 @@
"outputs": [],
"source": [
"# Set up the file paths. Edit this if you are not at ESRF or not using the latest data policy.\n",
"dataroot, analysisroot = segmenter_gui.guess_ESRF_paths() \n",
"if dataroot is None:\n",
" dataroot, analysisroot = segmenter_gui.guess_ESRF_paths() \n",
"\n",
"if len(dataroot)==0:\n",
" print(\"Please fix in the dataroot and analysisroot folder names above!!\")\n",
Expand Down Expand Up @@ -102,7 +126,8 @@
"outputs": [],
"source": [
"# USER: Decide which sample\n",
"sample = 'FeAu_0p5_tR_nscope'"
"if sample is None:\n",
" sample = 'FeAu_0p5_tR_nscope'"
]
},
{
Expand All @@ -128,7 +153,8 @@
"outputs": [],
"source": [
"# USER: Decide which dataset\n",
"dataset = \"top_100um\""
"if dataset is None:\n",
" dataset = \"top_100um\""
]
},
{
Expand Down Expand Up @@ -258,19 +284,6 @@
"Therefore notebooks 4 and onwards should work from either the tomo or pbp route."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "703d22d0-ef82-4e08-8087-c57e76e16de1",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"if 1:\n",
" raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand All @@ -280,13 +293,16 @@
},
"outputs": [],
"source": [
"# you can optionally skip samples\n",
"# skips_dict = {\n",
"# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n",
"# }\n",
"# otherwise by default skip nothing:\n",
"skips_dict = {\n",
" \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n",
" ds.sample: []\n",
"}\n",
"\n",
"dset_prefix = \"top_\" # some common string in the names of the datasets (*?)\n",
"\n",
"sample_list = [\"FeAu_0p5_tR_nscope\"]\n",
"sample_list = [ds.sample, ]\n",
" \n",
"samples_dict = utils.find_datasets_to_process(dataroot, skips_dict, dset_prefix, sample_list)\n",
"\n",
Expand Down
125 changes: 88 additions & 37 deletions ImageD11/nbGui/S3DXRD/4_visualise.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,43 @@
"\n",
"os.environ['OMP_NUM_THREADS'] = '1'\n",
"os.environ['OPENBLAS_NUM_THREADS'] = '1'\n",
"os.environ['MKL_NUM_THREADS'] = '1'\n",
"os.environ['MKL_NUM_THREADS'] = '1'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [
"parameters"
]
},
"outputs": [],
"source": [
"# this cell is tagged with 'parameters'\n",
"# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n",
"\n",
"PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n",
"\n",
"exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n",
"PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )"
"# dataset file to import\n",
"dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n",
"\n",
"# which phase to index\n",
"phase_str = 'Si'\n",
"\n",
"# the minimum number of peaks you want a pixel to have to be counted\n",
"min_unique = 400\n",
"\n",
"dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans"
]
},
{
Expand Down Expand Up @@ -69,8 +102,6 @@
"source": [
"# USER: Pass path to dataset file\n",
"\n",
"dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n",
"\n",
"ds = ImageD11.sinograms.dataset.load(dset_file)\n",
" \n",
"sample = ds.sample\n",
Expand Down Expand Up @@ -105,7 +136,6 @@
"outputs": [],
"source": [
"# now let's select a phase to index from our parameters json\n",
"phase_str = 'Fe'\n",
"\n",
"ref_ucell = ds.phases.unitcells[phase_str]\n",
"\n",
Expand All @@ -122,7 +152,8 @@
"source": [
"# import refinement manager\n",
"\n",
"refine = PBPRefine.from_h5(ds.refmanfile)"
"refmanpath = os.path.splitext(ds.refmanfile)[0] + f'_{phase_str}.h5'\n",
"refine = PBPRefine.from_h5(refmanpath)"
]
},
{
Expand All @@ -148,8 +179,6 @@
"source": [
"# choose the minimum number of peaks you want a pixel to have to be counted\n",
"\n",
"min_unique = 400\n",
"\n",
"refine.refinedmap.choose_best(min_unique)\n",
"\n",
"# refine.refinedmap.choose_best(min_unique)"
Expand Down Expand Up @@ -187,7 +216,7 @@
"\n",
"for i in range(3):\n",
" for j in range(3):\n",
" axs[i,j].imshow(refine.refinedmap.best_eps[:, :, i, j], origin=\"lower\", cmap=cmap, norm=normalizer)\n",
" axs[i,j].imshow(refine.refinedmap.best_eps[:, :, i, j], origin=\"lower\", cmap=cmap, norm=normalizer, interpolation='nearest')\n",
" axs[i,j].set_title(f'eps_{i+1}{j+1}')\n",
"fig.supxlabel('< Lab Y axis')\n",
"fig.supylabel('Lab X axis')\n",
Expand Down Expand Up @@ -274,7 +303,7 @@
"\n",
"for i in range(3):\n",
" for j in range(3):\n",
" axs[i,j].imshow(tmap.eps_sample[0, ..., i, j], origin=\"lower\", cmap=cmap, norm=normalizer)\n",
" axs[i,j].imshow(tmap.eps_sample[0, ..., i, j], origin=\"lower\", cmap=cmap, norm=normalizer, interpolation='nearest')\n",
" axs[i,j].set_title(f'eps_{i+1}{j+1}')\n",
"fig.supxlabel('Lab X axis --->')\n",
"fig.supylabel('Lab Y axis --->')\n",
Expand Down Expand Up @@ -330,25 +359,27 @@
"metadata": {},
"outputs": [],
"source": [
"# save the refined TensorMap to disk\n",
"\n",
"tmap.to_h5(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))\n",
"tmap.to_paraview(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))"
"# if we have a previous tomographic TensorMap, we can try to get the labels map too:\n",
"try:\n",
" tmap_tomo = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str)\n",
" tmap.add_map('labels', tmap_tomo.labels)\n",
"except (FileNotFoundError, OSError, KeyError):\n",
" # couldn't find one, continue anyway\n",
" pass"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"metadata": {},
"outputs": [],
"source": [
"# you can also do an MTEX export if you like:\n",
"# save the refined TensorMap to disk\n",
"\n",
"ctf_path = os.path.join(ds.analysispath, 'pbp_tensormap_refined.ctf')\n",
"refined_tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.h5')\n",
"\n",
"tmap.to_ctf_mtex(ctf_path, z_index=0)"
"tmap.to_h5(refined_tmap_path)\n",
"tmap.to_paraview(refined_tmap_path)"
]
},
{
Expand All @@ -359,17 +390,22 @@
},
"outputs": [],
"source": [
"ds.save()"
"# you can also do an MTEX export if you like:\n",
"\n",
"refined_ctf_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.ctf')\n",
"\n",
"tmap.to_ctf_mtex(refined_ctf_path, z_index=0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"if 1:\n",
" raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")"
"ds.save()"
]
},
{
Expand All @@ -382,15 +418,18 @@
"# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n",
"# you can add samples and datasets to skip in skips_dict\n",
"\n",
"# you can optionally skip samples\n",
"# skips_dict = {\n",
"# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n",
"# }\n",
"# otherwise by default skip nothing:\n",
"skips_dict = {\n",
" \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n",
" ds.sample: []\n",
"}\n",
"\n",
"dset_prefix = \"top\"\n",
"sample_list = [ds.sample, ]\n",
"\n",
"sample_list = [\"FeAu_0p5_tR_nscope\"]\n",
" \n",
"samples_dict = utils.find_datasets_to_process(ds.dataroot, skips_dict, dset_prefix, sample_list)\n",
"samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n",
" \n",
"# manual override:\n",
"# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n",
Expand All @@ -410,15 +449,19 @@
" ds = ImageD11.sinograms.dataset.load(dset_path)\n",
" print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n",
" \n",
" if not os.path.exists(ds.refoutfile):\n",
" refoutpath = os.path.splitext(ds.refoutfile)[0] + f'_{phase_str}.h5'\n",
" refmanpath = os.path.splitext(ds.refmanfile)[0] + f'_{phase_str}.h5'\n",
"\n",
" if not os.path.exists(refoutpath):\n",
" print(f\"Couldn't find PBP refinement output file for {dataset} in sample {sample}, skipping\")\n",
" continue\n",
" \n",
" if os.path.exists(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5')):\n",
" refined_tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.h5')\n",
" if os.path.exists(refined_tmap_path):\n",
" print(f\"Already have refined TensorMap output file for {dataset} in sample {sample}, skipping\")\n",
" continue\n",
" \n",
" refine = PBPRefine.from_h5(ds.refmanfile)\n",
" refine = PBPRefine.from_h5(refmanpath)\n",
" refine.refinedmap.choose_best(min_unique)\n",
" \n",
" # first let's work out what phase we have\n",
Expand All @@ -437,10 +480,18 @@
" tmap.get_ipf_maps()\n",
" eul = tmap.euler\n",
" \n",
" tmap.to_h5(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))\n",
" tmap.to_paraview(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))\n",
" ctf_path = os.path.join(ds.analysispath, 'pbp_tensormap_refined.ctf')\n",
" tmap.to_ctf_mtex(ctf_path, z_index=0)\n",
" # if we have a previous tomographic TensorMap, we can try to get the labels map too:\n",
" try:\n",
" tmap_tomo = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str)\n",
" tmap.add_map('labels', tmap_tomo.labels)\n",
" except (FileNotFoundError, OSError, KeyError):\n",
" # couldn't find one, continue anyway\n",
" pass\n",
" \n",
" tmap.to_h5(refined_tmap_path)\n",
" tmap.to_paraview(refined_tmap_path)\n",
" refined_ctf_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.ctf')\n",
" tmap.to_ctf_mtex(refined_ctf_path, z_index=0)\n",
"\n",
" ds.save()\n",
"\n",
Expand Down
Loading

0 comments on commit 944972c

Please sign in to comment.