Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Scanning 3DXRD Strain refinement #339

Merged
merged 33 commits into from
Oct 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
c8cea8f
Correct g-vector origin in point-by-point indexing
jadball Oct 2, 2024
e06f3e7
Progress on PBP refinement class, better PBPMap class (now just a col…
jadball Oct 4, 2024
ee7e313
Checkpoint Axel refinement code
jadball Oct 7, 2024
317db2e
Complete Numba refine, verified equal to Axel
jadball Oct 8, 2024
3b6bc0b
Refinement checkpoint
jadball Oct 9, 2024
3428e5a
Faster peak tagging for merging
jadball Oct 9, 2024
ac39c2f
PBP Refinement mostly complete
jadball Oct 11, 2024
186d6cd
Run PBP refinement on cluster
jadball Oct 11, 2024
41d6f32
Support strain in PBPMap
jadball Oct 11, 2024
0aa8842
Make PBPMap from TensorMap for refinement
jadball Oct 11, 2024
52fb0bb
Simplify cluster PBP refinement
jadball Oct 11, 2024
3351927
Merge branch 'FABLE-3DXRD:master' into master
jadball Oct 11, 2024
54daf78
Fix buggy unique peak counter
jadball Oct 12, 2024
cd04629
Fix typo in merged peak assignment tolerance
jadball Oct 12, 2024
d9ba486
Only refine at masked points
jadball Oct 12, 2024
cacc4ce
Attempt at stress conversion
jadball Oct 17, 2024
dfc79cc
Refinement notebooks
jadball Oct 17, 2024
7c4b6c3
Add new multiphase par to test dataset on Zenodo
jadball Oct 17, 2024
4b4fcf2
Bump test data version
jadball Oct 17, 2024
7ee3b55
Correct wrong default y0
jadball Oct 17, 2024
096fa60
Make tests pass
jadball Oct 17, 2024
a54c5a3
Update fetch data test
jadball Oct 17, 2024
adaa497
py2 compat
jadball Oct 17, 2024
87088f0
py2 compat
jadball Oct 17, 2024
42dbe71
py2 compat
jadball Oct 17, 2024
55c1548
Count unique peaks (h,k,l,etasign) in refinement
jadball Oct 17, 2024
0896033
Merge branch 'FABLE-3DXRD:master' into master
jadball Oct 17, 2024
257a894
Phase mask for stress calculations
jadball Oct 18, 2024
d11bd58
Initialise stress with nan
jadball Oct 18, 2024
1b58c03
Add some tests for Numba functions in PBP refinement
jadball Oct 18, 2024
2c9fe27
Merge branch 'master' of github.com:jadball/ImageD11
jadball Oct 18, 2024
523f565
Only import matplotlib when needed
jadball Oct 18, 2024
95a6f42
Skip PBP testing on Py2
jadball Oct 18, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 20 additions & 18 deletions ImageD11/fetch_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,16 @@

# what Zenodo folder can these be found in?
dataset_base_urls = {
'Si_cube_S3DXRD_nt_moves_dty': "https://sandbox.zenodo.org/records/90518/files/",
'Si_cube_S3DXRD_nt_moves_dty': "https://sandbox.zenodo.org/records/118843/files/",
}

# What are the names of the files in the Zenodo folder?
dataset_filenames = {
'Si_cube_S3DXRD_nt_moves_dty': {
'sparsefile': 'Si_cube_S3DXRD_nt_moves_dty_sparse.h5',
'parfile': 'Si_refined.par',
'parfile': 'pars.json',
'geomfile': 'geometry.par',
'phasefile0': 'Si_refined.par',
'e2dxfile': 'e2dx_E-08-0144_20240205.edf',
'e2dyfile': 'e2dy_E-08-0144_20240205.edf'
}
Expand Down Expand Up @@ -123,25 +125,25 @@ def _get_dataset(test_dataset_name, dest_folder, allow_download):
for filetype, filename in dataset_filenames[test_dataset_name].items():
file_url = dataset_base_urls[test_dataset_name] + filename
# is it a file that could be used as an attribute?
if filetype in id11dset.DataSet.ATTRNAMES:
if hasattr(ds, filetype):
if getattr(ds, filetype) is None:
# this is an attribute, but we don't have a path for it
# put it in processed data root
filepath = os.path.join(processed_data_root_dir, filename)
download_url(file_url, filepath)
setattr(ds, filetype, filepath)
else:
# the dataset has a path for this filetype already
filepath = getattr(ds, filetype)
download_url(file_url, filepath)
else:
# probably a spatial or a parfile
# chuck it in processed_data_root_dir
# set the attribute
# if filetype in id11dset.DataSet.ATTRNAMES:
if hasattr(ds, filetype):
if getattr(ds, filetype) is None:
# this is an attribute, but we don't have a path for it
# put it in processed data root
filepath = os.path.join(processed_data_root_dir, filename)
download_url(file_url, filepath)
setattr(ds, filetype, filepath)
else:
# the dataset has a path for this filetype already
filepath = getattr(ds, filetype)
download_url(file_url, filepath)
else:
# probably a spatial or a parfile
# chuck it in processed_data_root_dir
# set the attribute
filepath = os.path.join(processed_data_root_dir, filename)
download_url(file_url, filepath)
setattr(ds, filetype, filepath)

ds.import_from_sparse(ds.sparsefile)
ds.save()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,9 @@
"\n",
"if len(dataroot)==0:\n",
" print(\"Please fix in the dataroot and analysisroot folder names above!!\")\n",
"\n",
"analysisroot = os.path.join(analysisroot, 'James', '20241009')\n",
" \n",
"print('dataroot =',repr(dataroot))\n",
"print('analysisroot =',repr(analysisroot))"
]
Expand Down Expand Up @@ -101,7 +104,7 @@
"outputs": [],
"source": [
"# USER: Decide which sample\n",
"sample = 'WAu'"
"sample = 'FeAu_0p5_tR_nscope'"
]
},
{
Expand All @@ -127,7 +130,7 @@
"outputs": [],
"source": [
"# USER: Decide which dataset\n",
"dataset = \"siliconAttrz25\""
"dataset = \"top_100um\""
]
},
{
Expand Down Expand Up @@ -243,6 +246,20 @@
"ImageD11.sinograms.properties.main(ds.dsfile, options={'algorithm': 'lmlabel', 'wtmax': 70000, 'save_overlaps': False})"
]
},
{
"cell_type": "markdown",
"id": "3db1c3e1-e812-4098-a3ab-a7a10b7cab4c",
"metadata": {},
"source": [
"# Finished segmenting!\n",
"\n",
"You can now choose between two different indexing routes: tomographic (tomo) and point-by-point (pbp). \n",
"Tomo gives you better grain shapes, but can't handle highly deformed samples. \n",
"Point-by-point can only give you convex grain shapes (less accurate) but can handle high levels of deformation. \n",
"Both techniques will join back together during the strain refinement stage (notebook 3). \n",
"Therefore notebooks 4 and onwards should work from either the tomo or pbp route."
]
},
{
"cell_type": "code",
"execution_count": null,
Expand All @@ -269,9 +286,9 @@
" \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n",
"}\n",
"\n",
"dset_prefix = \"m\" # some common string in the names of the datasets (*?)\n",
"dset_prefix = \"top_\" # some common string in the names of the datasets (*?)\n",
"\n",
"sample_list = [\"Klegs\"]\n",
"sample_list = [\"FeAu_0p5_tR_nscope\"]\n",
" \n",
"samples_dict = utils.find_datasets_to_process(dataroot, skips_dict, dset_prefix, sample_list)\n",
"\n",
Expand Down
Loading
Loading