From 0311b613daa3c8865beaa230b23d0c627c2cea3c Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Thu, 19 Sep 2024 13:53:39 +0200 Subject: [PATCH 1/3] 3DXRD was not a valid module name --- ImageD11/nbGui/{3DXRD => TDXRD}/0_3DXRD_segment_frelon.ipynb | 0 ImageD11/nbGui/{3DXRD => TDXRD}/1_3DXRD_refine_parameters.ipynb | 0 ImageD11/nbGui/{3DXRD => TDXRD}/2_3DXRD_index.ipynb | 0 ImageD11/nbGui/{3DXRD => TDXRD}/2_3DXRD_index_friedel.ipynb | 0 ImageD11/nbGui/{3DXRD => TDXRD}/2_3DXRD_index_grid.ipynb | 0 ImageD11/nbGui/{3DXRD => TDXRD}/3_3DXRD_look_at_peaks.ipynb | 0 ImageD11/nbGui/{3DXRD => TDXRD}/4_3DXRD_merge_slices.ipynb | 0 ImageD11/nbGui/{3DXRD => TDXRD}/frelon_peaksearch.py | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename ImageD11/nbGui/{3DXRD => TDXRD}/0_3DXRD_segment_frelon.ipynb (100%) rename ImageD11/nbGui/{3DXRD => TDXRD}/1_3DXRD_refine_parameters.ipynb (100%) rename ImageD11/nbGui/{3DXRD => TDXRD}/2_3DXRD_index.ipynb (100%) rename ImageD11/nbGui/{3DXRD => TDXRD}/2_3DXRD_index_friedel.ipynb (100%) rename ImageD11/nbGui/{3DXRD => TDXRD}/2_3DXRD_index_grid.ipynb (100%) rename ImageD11/nbGui/{3DXRD => TDXRD}/3_3DXRD_look_at_peaks.ipynb (100%) rename ImageD11/nbGui/{3DXRD => TDXRD}/4_3DXRD_merge_slices.ipynb (100%) rename ImageD11/nbGui/{3DXRD => TDXRD}/frelon_peaksearch.py (100%) diff --git a/ImageD11/nbGui/3DXRD/0_3DXRD_segment_frelon.ipynb b/ImageD11/nbGui/TDXRD/0_3DXRD_segment_frelon.ipynb similarity index 100% rename from ImageD11/nbGui/3DXRD/0_3DXRD_segment_frelon.ipynb rename to ImageD11/nbGui/TDXRD/0_3DXRD_segment_frelon.ipynb diff --git a/ImageD11/nbGui/3DXRD/1_3DXRD_refine_parameters.ipynb b/ImageD11/nbGui/TDXRD/1_3DXRD_refine_parameters.ipynb similarity index 100% rename from ImageD11/nbGui/3DXRD/1_3DXRD_refine_parameters.ipynb rename to ImageD11/nbGui/TDXRD/1_3DXRD_refine_parameters.ipynb diff --git a/ImageD11/nbGui/3DXRD/2_3DXRD_index.ipynb b/ImageD11/nbGui/TDXRD/2_3DXRD_index.ipynb similarity index 100% rename from ImageD11/nbGui/3DXRD/2_3DXRD_index.ipynb rename to ImageD11/nbGui/TDXRD/2_3DXRD_index.ipynb diff --git a/ImageD11/nbGui/3DXRD/2_3DXRD_index_friedel.ipynb b/ImageD11/nbGui/TDXRD/2_3DXRD_index_friedel.ipynb similarity index 100% rename from ImageD11/nbGui/3DXRD/2_3DXRD_index_friedel.ipynb rename to ImageD11/nbGui/TDXRD/2_3DXRD_index_friedel.ipynb diff --git a/ImageD11/nbGui/3DXRD/2_3DXRD_index_grid.ipynb b/ImageD11/nbGui/TDXRD/2_3DXRD_index_grid.ipynb similarity index 100% rename from ImageD11/nbGui/3DXRD/2_3DXRD_index_grid.ipynb rename to ImageD11/nbGui/TDXRD/2_3DXRD_index_grid.ipynb diff --git a/ImageD11/nbGui/3DXRD/3_3DXRD_look_at_peaks.ipynb b/ImageD11/nbGui/TDXRD/3_3DXRD_look_at_peaks.ipynb similarity index 100% rename from ImageD11/nbGui/3DXRD/3_3DXRD_look_at_peaks.ipynb rename to ImageD11/nbGui/TDXRD/3_3DXRD_look_at_peaks.ipynb diff --git a/ImageD11/nbGui/3DXRD/4_3DXRD_merge_slices.ipynb b/ImageD11/nbGui/TDXRD/4_3DXRD_merge_slices.ipynb similarity index 100% rename from ImageD11/nbGui/3DXRD/4_3DXRD_merge_slices.ipynb rename to ImageD11/nbGui/TDXRD/4_3DXRD_merge_slices.ipynb diff --git a/ImageD11/nbGui/3DXRD/frelon_peaksearch.py b/ImageD11/nbGui/TDXRD/frelon_peaksearch.py similarity index 100% rename from ImageD11/nbGui/3DXRD/frelon_peaksearch.py rename to ImageD11/nbGui/TDXRD/frelon_peaksearch.py From 1b0d4dcd72f7f27211369e0a6b00bf005ce8221a Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Thu, 19 Sep 2024 15:56:00 +0200 Subject: [PATCH 2/3] cleanup for frelon peaksearch fix #324 --- ImageD11/blobcorrector.py | 13 +- ImageD11/frelon_peaksearch.py | 418 ++++++++++++++++++ .../nbGui/TDXRD/0_3DXRD_segment_frelon.ipynb | 146 +++--- ImageD11/nbGui/TDXRD/frelon_peaksearch.py | 239 ---------- ImageD11/nbGui/segmenter_gui.py | 12 +- ImageD11/sinograms/dataset.py | 121 ++--- 6 files changed, 576 insertions(+), 373 deletions(-) create mode 100755 ImageD11/frelon_peaksearch.py delete mode 100755 ImageD11/nbGui/TDXRD/frelon_peaksearch.py diff --git a/ImageD11/blobcorrector.py b/ImageD11/blobcorrector.py index 23192832..df4c62db 100644 --- a/ImageD11/blobcorrector.py +++ b/ImageD11/blobcorrector.py @@ -62,6 +62,8 @@ def __init__(self, argsplinefile, orientation="edf"): """ Argument is the name of a fit2d spline file """ + import warnings + warnings.warn("For new data from ID11, better to use the dx,dy files instead of Fit2d spline", DeprecationWarning) self.splinefile = argsplinefile self.tolerance = 1e-5 self.orientation = orientation @@ -341,12 +343,19 @@ def correct_cf_with_spline(cf, spline_file): """Creates a correctorclass from the spline file Corrects the columnfile with the spline file Returns the corrected columnfile""" - corrector = correctorclass(spline_file) corrector.correct_px_lut(cf) - return cf +def correct_cf_with_dxdyfiles(cf, dxfile, dyfile): + """Corrects the columnfile with the dx/dy file + Returns the corrected columnfile""" + es = eiger_spatial( dxfile, dyfile ) + pkin = { 's_raw': cf['s_raw'], 'f_raw': cf['f_raw'] } + pkout = es( pkin ) + cf.addcolumn( pkout['sc'], 'sc' ) + cf.addcolumn( pkout['fc'], 'fc' ) + return cf # #""" diff --git a/ImageD11/frelon_peaksearch.py b/ImageD11/frelon_peaksearch.py new file mode 100755 index 00000000..5cae1031 --- /dev/null +++ b/ImageD11/frelon_peaksearch.py @@ -0,0 +1,418 @@ +import h5py +import numpy as np +import scipy + +import fabio +import functools +import ImageD11.cImageD11 + +import multiprocessing +from ImageD11.columnfile import columnfile +from tqdm.contrib.concurrent import process_map + +from tqdm.auto import tqdm + +# we will now define our 2D segmenter and 3D merging functions +# this will eventually make its way into ImageD11 properly +# there are some important parameters that we need to ensure are correctly selected for good segmentation +# these are the following lines: +# self.m_offset = self.bg < 80 +# self.m_ratio = self.bg > 200 +# both in the "bgsub" function of the "worker" class +# these determine which part of the detector image is treated as the background, and which is treated as peaks +# these parameters may need adjusting when going from undeformed to deformed data +# to adjust them, change the numbers, then run the three following cells to plot the results of the segmentation on a single frame +# a good choice of numbers will mean we exclude regions between rings and powder background on the rings +# therefore only peaks created by grains should survive + + +def do3dmerge(cf_2d_dict, n, omega, radius=1.6): + """ + Merges the peaks in cf_2d_dict if the center is within radius pixels + n = the number of peaks on each frame in cf_2d_dict + omega = the rotation motor position for each frame + + returns cf_3d_dict = merged peaks, as well as the input + """ + s = cf_2d_dict["s_raw"] + f = cf_2d_dict["f_raw"] + o = cf_2d_dict["o_raw"] + I = cf_2d_dict["s_I"] + s1 = cf_2d_dict["s_1"] + + print("Making 3D merge") + + # pointers to frames in s,f,o,I,n,s1 + p = np.cumsum( + np.concatenate( + ( + [ + 0, + ], + n, + ) + ) + ) + # make a KDTree for each frame (wastes a bit of memory, but easier for sorting later) + trees = [ + scipy.spatial.cKDTree(np.transpose((s[p[i] : p[i + 1]], f[p[i] : p[i + 1]]))) + for i in range(len(n)) + ] + print("made trees") + # sys.stdout.flush() + # because interlaced might not be in order + order = np.argsort(omega % 360) + # peaks that overlap, k : 0 -> npks == len(s|f|o) + # diagonal + krow = list(range(len(o))) + kcol = list(range(len(o))) + for i in range(1, len(n)): # match these to previous + flo = order[i - 1] + fhi = order[i] + tlo = trees[flo] + thi = trees[fhi] + # 1.6 is how close centers should be to overlap + lol = trees[flo].query_ball_tree(trees[fhi], r=radius) + for srcpk, destpks in enumerate(lol): # dest is strictly higher than src + for destpk in destpks: + krow.append(srcpk + p[flo]) + kcol.append(destpk + p[fhi]) + csr = scipy.sparse.csr_matrix( + (np.ones(len(krow), dtype=bool), (kcol, krow)), shape=(len(o), len(o)) + ) + # connected components == find all overlapping peaks + ncomp, labels = scipy.sparse.csgraph.connected_components( + csr, directed=False, return_labels=True + ) + print("connected components") + # sys.stdout.flush() + # Now merge the properties + npkmerged = np.bincount(labels, minlength=ncomp) # number of peaks that were merged + s3d1 = np.bincount(labels, minlength=ncomp, weights=s1) # s_1 + s3dI = np.bincount(labels, minlength=ncomp, weights=I) # s_I + ssI = np.bincount(labels, minlength=ncomp, weights=I * s) # s_sI + sfI = np.bincount(labels, minlength=ncomp, weights=I * f) # s_sI + soI = np.bincount(labels, minlength=ncomp, weights=I * o) # s_sI + s3d = ssI / s3dI + f3d = sfI / s3dI + o3d = soI / s3dI + + cf_3d_dict = { + "s_raw": s3d, + "f_raw": f3d, + "omega": o3d, + "sum_intensity": s3dI, + "Number_of_pixels": s3d1, + } + + cf_2d_dict["spot3d_id"] = labels + return cf_3d_dict + + +class worker: + """subtracts background, custom for ma4750""" + + def __init__( + self, + bgfile, + maskfile, + threshold=50, + smoothsigma=1.0, + bgc=0.9, + minpx=3, + m_offset_thresh=100, + m_ratio_thresh=135, + flatfile=None, + darkfile=None, + ): + """ + bgfile = image containing the background + maskfile = detector mask + + threshold = ADU below which to zero out image + smoothsigma = sigma for Gaussian filter before labelleing + bgc = fractional part of bg per peak to remove + minpx = minimum number of pixels in a peak + m_offset_thresh = bgfile less than this is constant + m_ratio_thresh = bgfile greather than this is scaled? + + flatfile = detector sensitivity image. + darkfile = detector offset image + """ + self.flat = None + self.dark = None + self.bg = None + self.mask = None + + if flatfile is not None: + if isinstance(flatfile, np.ndarray): + self.flat = flatfile + else: + self.flat = fabio.open(flatfile).data + + if darkfile is not None: + if isinstance(flatfile, np.ndarray): + self.flat = flatfile + else: + self.flat = fabio.open(flatfile).data + + if bgfile is not None: + if isinstance(bgfile, np.ndarray): + self.bg = bgfile + else: + self.bg = self.correct(fabio.open(bgfile).data) + + if maskfile is not None: + if isinstance(maskfile, np.ndarray): + self.mask = maskfile + else: + self.mask = fabio.open(maskfile).data + + self.threshold = threshold # was 50 # ADU to zero out image + self.smoothsigma = smoothsigma # sigma for Gaussian before labelleing + self.bgc = bgc # fractional part of bg per peak to remove + self.minpx = minpx + + if self.bg is not None: + self.m_offset = self.bg < m_offset_thresh + self.m_ratio = self.bg > m_ratio_thresh + self.mbg = np.mean(self.bg[self.m_offset]) + self.bg -= self.mbg # remove dark + self.invbg = 1 / self.bg[self.m_ratio] + + self.bins = b = np.linspace(0, 2, 256) + self.bc = (b[1:] + b[:-1]) / 2 # bin cens + self.wrk = None + self.labels = None + + def correct(self, img): + """ + Applies dark + flat if given + """ + cor = img.astype(np.float32) + if self.dark is not None: + np.subtract(img, self.dark, img) + if self.flat is not None: + np.divide(img, self.flat, img) + return cor + + def bgsub(self, img): + """ + This attempts to remove a background by rescaling the self.bg image. + + """ + img = self.correct(img) + if self.bg is None: # use image border + self.scale = 1 + self.offset = np.median( + (img[0].min(), img[-1].min(), img[:, 0].min(), img[:, -1].min()) + ) + return img - self.offset + offset = np.mean(img[self.m_offset]) # remove dark + np.subtract(img, offset, img) + ratio = img[self.m_ratio] * self.invbg + h, b = np.histogram(ratio, bins=self.bins) + htrim = np.where(h < h.max() * 0.05, 0, h) + r = (htrim * self.bc).sum() / htrim.sum() + # Better to scale background to data rather than data to background? + # np.multiply(img, 1 / r, img) + np.subtract(img, self.bg * r, img) + self.offset = offset + self.scale = r + return img + + def masksub(self, img): + # active pixels are 0, masked pixels are 1 + img_masked = img.copy() + if self.mask is not None: + img_masked[self.mask == 1] = 0 + return img_masked + + def peaksearch(self, img, omega=0): + if self.wrk is None: + self.wrk = np.empty(img.shape, "b") + self.labels = np.empty(img.shape, "i") + + self.cor = self.bgsub(img) + self.cor = self.masksub(self.cor) + # smooth the image for labelling (removes noise maxima) + self.smoothed = scipy.ndimage.gaussian_filter(self.cor, self.smoothsigma) + assert self.smoothed.dtype == np.float32 + # zero out the background + self.mt = self.smoothed < self.threshold + self.smoothed[self.mt] = 0 + # label on smoothed image + self.npks = ImageD11.cImageD11.localmaxlabel( + self.smoothed, self.labels, self.wrk + ) + self.labels[self.mt] = 0 + # now find the borders of each blob : first dilate + l3 = scipy.ndimage.uniform_filter(self.labels * 7, 3) + self.borders = (self.labels * 7) != l3 + # border properties - use the real data or the smoothed? Real data. Might be noisier + self.blobsouter = ImageD11.cImageD11.blobproperties( + self.cor, self.labels * self.borders, self.npks + ) + # Computed background per peak + self.per_peak_bg = np.concatenate( + ( + [ + 0, + ], + self.blobsouter[:, ImageD11.cImageD11.mx_I], + ) + ) + self.bgcalc = self.per_peak_bg[self.labels] + self.m_top = self.cor > self.bgcalc * self.bgc + self.forprops = self.cor * self.m_top + self.blobs = ImageD11.cImageD11.blobproperties( + self.forprops, self.labels * self.m_top, self.npks, omega=omega + ) + ImageD11.cImageD11.blob_moments(self.blobs) + self.enoughpx = self.blobs[:, ImageD11.cImageD11.s_1] >= self.minpx + self.goodpeaks = self.blobs[self.enoughpx] + return self.goodpeaks + + def plots(self): + pass + + +@functools.lru_cache(maxsize=1) +def get_dset(h5name, dsetname): + """This avoids to re-read the dataset many times""" + dset = h5py.File(h5name, "r")[dsetname] + return dset + + +def pps(arg): + hname, dsetname, num, omega, worker_args = arg + if pps.worker is None: + pps.worker = worker(**worker_args) + frm = get_dset(hname, dsetname)[num] + pks = pps.worker.peaksearch(frm, omega=omega) + return num, pks + + +def guess_bg(ds, scan_number=0, start=0, step=9, n=None): + """ + ds = dataset object + scan_number = index into ds.scans + start = 0 = first image to look at + step is the step for going through frames + n = number of frames to look at (None == all) + + returns median( [ frames[j::step,:,:].min() for j in range(step) ] ) + e.g. take the minimum of every step images + then return the median of these + """ + with h5py.File(ds.masterfile, "r") as hin: + dset = hin[ds.scans[scan_number]]["measurement"][ds.detector] + bgj = [] + end = len(dset) + if n is not None: + end = min(end, n + start) + for j in range(step): # loop over step offset + bg = dset[start + j] + for i in range(start + step, end, step): # min for this series + frm = dset[i + j] + bg = np.where(bg > frm, frm, bg) + bgj.append(bg) + return np.median(bgj, axis=0) + + +pps.worker = None + +# PKSAVE = 's_raw f_raw o_raw s_1 s_I m_ss m_ff m_oo m_sf m_so m_fo'.split() +PKSAVE = [ + "s_1", + "s_I", + "s_I2", + "s_fI", + "s_ffI", + "s_sI", + "s_ssI", + "s_sfI", + "s_oI", + "s_ooI", + "s_soI", + "s_foI", + "mx_I", + "mx_I_f", + "mx_I_s", + "mx_I_o", + "bb_mx_f", + "bb_mx_s", + "bb_mx_o", + "bb_mn_f", + "bb_mn_s", + "bb_mn_o", + "avg_i", + "f_raw", + "s_raw", + "o_raw", + "m_ss", + "m_ff", + "m_oo", + "m_sf", + "m_so", + "m_fo", +] +PKCOL = [getattr(ImageD11.cImageD11, p) for p in PKSAVE] + + +def process(ds, worker_args, ncpu=None): + """ + Runs over the first scan in a dataset in parallel + + ds = ImageD11.sinograms.dataset + work_args = see worker.__init__.__doc__ + ncpu = cores to use (None = guess) + + returns cf_2d, cf_3d + """ + if ncpu is None: + nthreads = max(1, ImageD11.cImageD11.cores_available() - 1) + hname = ds.masterfile + scan_name = ds.scans[0] + frames_dset = scan_name + "/measurement/" + ds.detector + omega = ds.omega[0, :] + + n_frames = omega.shape[0] + + args = [(hname, frames_dset, i, omega[i], worker_args) for i in range(n_frames)] + + all_peaks = process_map(pps, args, chunksize=1, max_workers=nthreads) + + # make a dict to hold our results in + cf_2d_dict = {} + + # populate it with empty lists + for title in PKSAVE: + cf_2d_dict[title] = [] + + npks = [] + + for peak_props in all_peaks: + this_index, props = peak_props + + for title, prop_index in zip(PKSAVE, PKCOL): + cf_2d_dict[title].extend(props[:, prop_index]) + + npks.append(props.shape[0]) + + # convert to numpy arrays: + for key, value in cf_2d_dict.items(): + cf_2d_dict[key] = np.array(value) + + cf_3d_dict = do3dmerge(cf_2d_dict, npks, omega) + + # conversion to a dictionary is a dataset method + cf_2d_dict["omega"] = cf_2d_dict["o_raw"] + + cf_2d_dict.pop("o_raw") # removes duplicate + cf_2d = ds.get_colfile_from_peaks_dict(cf_2d_dict) # does the spatial + + cf_3d_dict["spot3d_id"] = np.arange(len(cf_3d_dict["s_raw"])) + cf_3d = ds.get_colfile_from_peaks_dict(cf_3d_dict) + + return cf_2d, cf_3d diff --git a/ImageD11/nbGui/TDXRD/0_3DXRD_segment_frelon.ipynb b/ImageD11/nbGui/TDXRD/0_3DXRD_segment_frelon.ipynb index 2e77f7cf..d9e9d5db 100755 --- a/ImageD11/nbGui/TDXRD/0_3DXRD_segment_frelon.ipynb +++ b/ImageD11/nbGui/TDXRD/0_3DXRD_segment_frelon.ipynb @@ -35,7 +35,9 @@ "cell_type": "code", "execution_count": null, "id": "2402147c-5513-4907-8ca9-76e3e252df0c", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", @@ -73,31 +75,33 @@ "from ImageD11.nbGui import nb_utils as utils\n", "from ImageD11.nbGui import segmenter_gui\n", "\n", - "from frelon_peaksearch import worker, process\n", - "\n", - "from ImageD11.blobcorrector import correct_cf_with_spline\n", + "from ImageD11.frelon_peaksearch import worker, process, guess_bg\n", "\n", "\n", "# Experts : update these files for your detector if you need to\n", "\n", - "splinefile = '/data/id11/inhouse1/ewoks/detectors/files/Frelon2k_C36/frelon36.spline'\n", - "bgfile = \"/data/visitor/ihma439/id11/20231211/PROCESSED_DATA/FeAu_0p5_tR/tdxrd_all/ff_bkg.edf\"\n", - "maskfile = '/data/id11/inhouse1/ewoks/detectors/files/Frelon2k_C36/mask.edf'\n", + "# give dx/dy as tuple instead of spline\n", + "# Since 2024: there is no good spline for a detector at ID11.\n", + "# splinefile = ('/data/id11/3dxrd/inhouse/Frelon36/frelon36_spline_20240604_dx.edf','/data/id11/3dxrd/inhouse/Frelon36/frelon36_spline_20240604_dy.edf')\n", + "splinefile = ('/data/id11/3dxrd/inhouse/Frelon36/frelon36_spline_20240604_ftm1x1_dx.edf','/data/id11/3dxrd/inhouse/Frelon36/frelon36_spline_20240604_ftm1x1_dy.edf')\n", + "bgfile = None # 'bg.edf'\n", + "maskfile = '/data/visitor/xa8/id11/20240917/PROCESSED_DATA/setup/ftm_mask.edf' # '/data/id11/inhouse1/ewoks/detectors/files/Frelon2k_C36/mask.edf'\n", + "\n", + "detector = \"frelon6\" # fixme - guess this from masterfile + scan\n", + "omegamotor = \"frot\"\n", + "dtymotor = \"diffy\"\n", "\n", - "detector = \"frelon3\"\n", - "omegamotor = \"diffrz\"\n", - "dtymotor = \"diffty\"\n", "\n", "#Define the initial parameters\n", "options = {\n", " \"bgfile\":bgfile,\n", " \"maskfile\":maskfile,\n", - " \"threshold\":50,\n", + " \"threshold\":500,\n", " \"smoothsigma\":1.0,\n", " \"bgc\":0.9,\n", " \"minpx\":3,\n", - " \"m_offset_thresh\":80,\n", - " \"m_ratio_thresh\":135,\n", + " \"m_offset_thresh\":100,\n", + " \"m_ratio_thresh\":150,\n", "}" ] }, @@ -142,7 +146,7 @@ "outputs": [], "source": [ "# USER: Decide which sample\n", - "sample = 'FeAu_0p5_tR'" + "sample = 'S01_Pure_Al_After_PDF'" ] }, { @@ -168,7 +172,7 @@ "outputs": [], "source": [ "# USER: Decide which dataset\n", - "dataset = \"ff1\"" + "dataset = \"3dxrd_rt_04\"" ] }, { @@ -190,7 +194,10 @@ " omegamotor=omegamotor,\n", " dtymotor=dtymotor)\n", "ds.import_all(scans=[\"1.1\"])\n", - "ds.splinefile = splinefile\n", + "if len(splinefile) == 1:\n", + " ds.splinefile = splinefile\n", + "else:\n", + " ds.e2dxfile, ds.e2dyfile = splinefile\n", "ds.maskfile = maskfile\n", "ds.bgfile = bgfile\n", "ds.save()" @@ -199,105 +206,84 @@ { "cell_type": "code", "execution_count": null, - "id": "68b22a6a-9325-40f4-af9d-945c0187ffae", + "id": "8822b96c-a33b-4bf2-9d95-e42d6d90e55b", "metadata": { "tags": [] }, "outputs": [], "source": [ - "ui = segmenter_gui.FrelonSegmenterGui(ds, worker, process, **options)" + "bg = guess_bg( ds )\n", + "plt.imshow(bg)\n", + "fabio.edfimage.edfimage(bg).save('bg.edf')\n", + "plt.colorbar()" ] }, { "cell_type": "code", "execution_count": null, - "id": "eee00548-3a48-44d0-b4ad-e71b71de95ca", + "id": "051901fc-e8a6-455e-9418-17823c6b222e", "metadata": { "tags": [] }, "outputs": [], "source": [ - "options = ui.getopts()" + "%matplotlib widget" ] }, { "cell_type": "code", "execution_count": null, - "id": "5a1795a9-16eb-430d-a246-a26b12c35e77", + "id": "68b22a6a-9325-40f4-af9d-945c0187ffae", "metadata": { "tags": [] }, "outputs": [], "source": [ - "# now we run the segmenter on all our data\n", - "\n", - "nthreads = len(os.sched_getaffinity(os.getpid()))\n", - "\n", - "cf_2d, cf_3d = process(ds, nthreads-1, options)" + "ui = segmenter_gui.FrelonSegmenterGui(ds, worker, process, **options)" ] }, { "cell_type": "code", "execution_count": null, - "id": "daccc72e-0aae-4332-80b0-e9ed894056e9", + "id": "eee00548-3a48-44d0-b4ad-e71b71de95ca", "metadata": { "tags": [] }, "outputs": [], "source": [ - "# we can use this to verify that the 3D merging is behaving as expected\n", - "# don't worry about this too much!\n", - "\n", - "# take a few 3d peaks with the most 2d peaks, plot them\n", - "\n", - "unique, counts = np.unique(cf_2d.spot3d_id, return_counts=True)\n", - "hits_dict = dict(zip(unique, counts))\n", - "hits_dict_max = sorted(hits_dict.items(), key=lambda x: x[1], reverse=True)\n", - "\n", - "m = np.isin(cf_3d.index, [spot3d_id for spot3d_id, count in hits_dict_max[500:501]])\n", - "cf_3d_single_peak = cf_3d.copy()\n", - "cf_3d_single_peak.filter(m)\n", - "\n", - "peak_2d_mask = np.isin(cf_2d.spot3d_id, cf_3d_single_peak.index)\n", - "cf_2d_peaks = cf_2d.copy()\n", - "cf_2d_peaks.filter(peak_2d_mask)\n", - "\n", - "fig, ax = plt.subplots()\n", - "ax.scatter(cf_3d_single_peak.f_raw, cf_3d_single_peak.s_raw, marker=\"X\", c=cf_3d_single_peak.omega, s=50, label='Merged 3D peak')\n", - "cols = ax.scatter(cf_2d_peaks.f_raw, cf_2d_peaks.s_raw, c=cf_2d_peaks.o_raw, s=cf_2d_peaks.s_I / 1000, label='Contibutory 2D peaks')\n", - "fig.colorbar(cols)\n", - "ax.set_xlim(0, 2048)\n", - "ax.set_ylim(0, 2048)\n", - "ax.invert_yaxis()\n", - "ax.legend()\n", - "ax.set_title(\"Color is omega of peak. Scaled by sum intensity\")\n", - "ax.set_xlabel(\"f_raw\")\n", - "ax.set_ylabel(\"s_raw\")\n", - "plt.show()" + "options = ui.getopts()\n", + "print(options)" ] }, { "cell_type": "code", "execution_count": null, - "id": "88fe5554-8b0a-4c17-a2a1-02536e476c42", + "id": "5a1795a9-16eb-430d-a246-a26b12c35e77", "metadata": { "tags": [] }, "outputs": [], "source": [ - "cf_2d = correct_cf_with_spline(cf_2d, splinefile)" + "# now we run the segmenter on all our data\n", + "\n", + "cf_2d, cf_3d = process(ds, options)" ] }, { "cell_type": "code", "execution_count": null, - "id": "e10d2350-e9f6-419a-a69d-07b575a9e8ae", + "id": "0fa07e53-93f4-4ce9-b0e5-1da5e6a5d511", "metadata": { "tags": [] }, "outputs": [], "source": [ - "cf_3d = correct_cf_with_spline(cf_3d, splinefile)" + "# display some peaks\n", + "f,a=plt.subplots(1,2,figsize=(12,6))\n", + "a[0].plot(cf_3d.f_raw,cf_3d.s_raw,'.',ms=1)\n", + "a[0].set(xlabel='fast index', ylabel='slow index',aspect='equal', title='peaks on detector')\n", + "a[1].plot(cf_3d.omega,cf_3d.sum_intensity,'.',ms=1)\n", + "a[1].set(xlabel='omega',ylabel='sum intensity',yscale='log',title='peaks vs omega');" ] }, { @@ -309,7 +295,9 @@ }, "outputs": [], "source": [ - "parfile = os.path.join(ds.analysisroot, 'pars_tdxrd.json')\n", + "# frequent problem here: we do not have parameters at this point in the project. This comes later.\n", + "# parfile = os.path.join(ds.analysisroot, 'pars_tdxrd.json')\n", + "parfile = os.path.join(ds.analysisroot, 'Al.par')\n", "ds.parfile = parfile" ] }, @@ -322,8 +310,8 @@ }, "outputs": [], "source": [ - "ds.update_colfile_pars(cf_2d, phase_name='Fe')\n", - "ds.update_colfile_pars(cf_3d, phase_name='Fe')" + "ds.update_colfile_pars(cf_2d) # phase is not needed as unitcell is not used in here\n", + "ds.update_colfile_pars(cf_3d)" ] }, { @@ -339,6 +327,18 @@ "ImageD11.columnfile.colfile_to_hdf(cf_3d, ds.col3dfile)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "1354aa5d-8924-43cf-b163-21bdb18e860e", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "ds.col2dfile" + ] + }, { "cell_type": "code", "execution_count": null, @@ -387,8 +387,6 @@ "# manual override:\n", "# samples_dict = {\"FeAu_0p5_tR\": [\"ff1\", \"ff2\"]}\n", "\n", - "nthreads = len(os.sched_getaffinity(os.getpid()))\n", - "\n", "for sample, datasets in samples_dict.items():\n", " for dataset in datasets:\n", " print(f\"Processing dataset {dataset} in sample {sample}\")\n", @@ -407,22 +405,22 @@ " \n", " ds.import_all(scans=[\"1.1\"])\n", " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", - " ds.save()\n", + " if len(splinefile) == 1:\n", + " ds.splinefile = splinefile\n", + " else:\n", + " ds.e2dxfile, ds.e2dyfile = splinefile\n", " \n", " ds.splinefile = splinefile\n", " ds.maskfile = maskfile\n", " ds.bgfile = bgfile\n", " ds.parfile = parfile\n", - "\n", - " print(\"Peaksearching\")\n", - " cf_2d, cf_3d = process(ds, nthreads-1, options)\n", + " ds.save()\n", " \n", - " print(\"Spatially correcting peaks\")\n", - " cf_2d = correct_cf_with_spline(cf_2d, splinefile)\n", - " cf_3d = correct_cf_with_spline(cf_3d, splinefile)\n", + " print(\"Peaksearching\")\n", + " cf_2d, cf_3d = process(ds, options)\n", " \n", - " ds.update_colfile_pars(cf_2d, phase_name='Fe')\n", - " ds.update_colfile_pars(cf_3d, phase_name='Fe')\n", + " ds.update_colfile_pars( cf_2d )\n", + " ds.update_colfile_pars( cf_3d )\n", "\n", " print(\"Saving peaks to file\")\n", " ImageD11.columnfile.colfile_to_hdf(cf_2d, ds.col2dfile)\n", diff --git a/ImageD11/nbGui/TDXRD/frelon_peaksearch.py b/ImageD11/nbGui/TDXRD/frelon_peaksearch.py deleted file mode 100755 index 65ca5f24..00000000 --- a/ImageD11/nbGui/TDXRD/frelon_peaksearch.py +++ /dev/null @@ -1,239 +0,0 @@ -import h5py -import numpy as np -import scipy - -import fabio -import functools -import ImageD11.cImageD11 - -import multiprocessing -from ImageD11.columnfile import columnfile -from tqdm.contrib.concurrent import process_map - -from tqdm.auto import tqdm - -# we will now define our 2D segmenter and 3D merging functions -# this will eventually make its way into ImageD11 properly -# there are some important parameters that we need to ensure are correctly selected for good segmentation -# these are the following lines: -# self.m_offset = self.bg < 80 -# self.m_ratio = self.bg > 200 -# both in the "bgsub" function of the "worker" class -# these determine which part of the detector image is treated as the background, and which is treated as peaks -# these parameters may need adjusting when going from undeformed to deformed data -# to adjust them, change the numbers, then run the three following cells to plot the results of the segmentation on a single frame -# a good choice of numbers will mean we exclude regions between rings and powder background on the rings -# therefore only peaks created by grains should survive - -def do3dmerge(cf_2d_dict, n, omega): - s = cf_2d_dict["s_raw"] - f = cf_2d_dict["f_raw"] - o = cf_2d_dict["o_raw"] - I = cf_2d_dict["s_I"] - s1 = cf_2d_dict["s_1"] - - print('Making 3D merge') - - # pointers to frames in s,f,o,I,n,s1 - p = np.cumsum(np.concatenate(([0, ], n))) - # make a KDTree for each frame (wastes a bit of memory, but easier for sorting later) - trees = [scipy.spatial.cKDTree(np.transpose((s[p[i]:p[i + 1]], f[p[i]:p[i + 1]]))) - for i in range(len(n))] - print('made trees') - # sys.stdout.flush() - # because interlaced might not be in order - order = np.argsort(omega % 360) - # peaks that overlap, k : 0 -> npks == len(s|f|o) - # diagonal - krow = list(range(len(o))) - kcol = list(range(len(o))) - for i in range(1, len(n)): # match these to previous - flo = order[i - 1] - fhi = order[i] - tlo = trees[flo] - thi = trees[fhi] - # 1.6 is how close centers should be to overlap - lol = trees[flo].query_ball_tree(trees[fhi], r=1.6) - for srcpk, destpks in enumerate(lol): # dest is strictly higher than src - for destpk in destpks: - krow.append(srcpk + p[flo]) - kcol.append(destpk + p[fhi]) - csr = scipy.sparse.csr_matrix((np.ones(len(krow), dtype=bool), - (kcol, krow)), shape=(len(o), len(o))) - # connected components == find all overlapping peaks - ncomp, labels = scipy.sparse.csgraph.connected_components(csr, - directed=False, return_labels=True) - - print('connected components') - # sys.stdout.flush() - # Now merge the properties - npkmerged = np.bincount(labels, minlength=ncomp) # number of peaks that were merged - s3d1 = np.bincount(labels, minlength=ncomp, weights=s1) # s_1 - s3dI = np.bincount(labels, minlength=ncomp, weights=I) # s_I - ssI = np.bincount(labels, minlength=ncomp, weights=I * s) # s_sI - sfI = np.bincount(labels, minlength=ncomp, weights=I * f) # s_sI - soI = np.bincount(labels, minlength=ncomp, weights=I * o) # s_sI - s3d = ssI / s3dI - f3d = sfI / s3dI - o3d = soI / s3dI - - cf_3d_dict = { - "s_raw": s3d, - "f_raw": f3d, - "omega": o3d, - "sum_intensity": s3dI, - "Number_of_pixels": s3d1 - } - - spot3d_id = labels - - return cf_3d_dict, spot3d_id - -class worker: - """ subtracts background, custom for ma4750 """ - - def __init__(self, bgfile, maskfile, threshold=50, smoothsigma=1., bgc=0.9, minpx=3, m_offset_thresh=80, m_ratio_thresh=135): - self.bg = fabio.open(bgfile).data - self.mask = fabio.open(maskfile).data - self.threshold = threshold # was 50 # ADU to zero out image - self.smoothsigma = smoothsigma # sigma for Gaussian before labelleing - self.bgc = bgc # fractional part of bg per peak to remove - self.minpx = minpx - - self.m_offset = self.bg < m_offset_thresh - - self.mbg = np.mean(self.bg[self.m_offset]) - - self.m_ratio = self.bg > m_ratio_thresh - - self.bg -= self.mbg # remove dark - self.invbg = 1 / self.bg[self.m_ratio] - self.bins = b = np.linspace(0, 2, 256) - self.bc = (b[1:] + b[:-1]) / 2 - - self.wrk = np.empty(self.bg.shape, 'b') - self.labels = np.empty(self.bg.shape, 'i') - - def bgsub(self, img): - img = img.astype(np.float32) - offset = np.mean(img[self.m_offset]) # remove dark - np.subtract(img, offset, img) - ratio = img[self.m_ratio] * self.invbg - h, b = np.histogram(ratio, bins=self.bins) - htrim = np.where(h < h.max() * 0.05, 0, h) - r = (htrim * self.bc).sum() / htrim.sum() - # Better to scale background to data rather than data to background? - # np.multiply(img, 1 / r, img) - np.subtract(img, self.bg * r, img) - self.offset = offset - self.scale = r - return img - - def masksub(self, img): - # active pixels are 0, masked pixels are 1 - img_masked = img.copy() - img_masked[self.mask == 1] = 0 - return img_masked - - def peaksearch(self, img, omega=0): - self.cor = self.bgsub(img) - self.cor = self.masksub(self.cor) - # smooth the image for labelling (removes noise maxima) - self.smoothed = scipy.ndimage.gaussian_filter(self.cor, self.smoothsigma) - assert self.smoothed.dtype == np.float32 - # zero out the background - self.mt = self.smoothed < self.threshold - self.smoothed[self.mt] = 0 - # label on smoothed image - self.npks = ImageD11.cImageD11.localmaxlabel(self.smoothed, self.labels, self.wrk) - self.labels[self.mt] = 0 - # now find the borders of each blob : first dilate - l3 = scipy.ndimage.uniform_filter(self.labels * 7, 3) - self.borders = (self.labels * 7) != l3 - # border properties - use the real data or the smoothed? Real data. Might be noisier - self.blobsouter = ImageD11.cImageD11.blobproperties(self.cor, self.labels * self.borders, self.npks) - # Computed background per peak - self.per_peak_bg = np.concatenate(([0, ], self.blobsouter[:, ImageD11.cImageD11.mx_I])) - self.bgcalc = self.per_peak_bg[self.labels] - self.m_top = self.cor > self.bgcalc * self.bgc - self.forprops = self.cor * self.m_top - self.blobs = ImageD11.cImageD11.blobproperties(self.forprops, - self.labels * self.m_top, - self.npks, - omega=omega) - ImageD11.cImageD11.blob_moments(self.blobs) - self.enoughpx = self.blobs[:, ImageD11.cImageD11.s_1] >= self.minpx - self.goodpeaks = self.blobs[self.enoughpx] - return self.goodpeaks - - def plots(self): - pass - - -@functools.lru_cache(maxsize=1) -def get_dset(h5name, dsetname): - """This avoids to re-read the dataset many times""" - dset = h5py.File(h5name, "r")[dsetname] - return dset - - -def pps(arg): - hname, dsetname, num, omega, worker_args = arg - if pps.worker is None: - pps.worker = worker(**worker_args) - frm = get_dset(hname, dsetname)[num] - pks = pps.worker.peaksearch(frm, omega=omega) - return num, pks - - -pps.worker = None - -# PKSAVE = 's_raw f_raw o_raw s_1 s_I m_ss m_ff m_oo m_sf m_so m_fo'.split() -PKSAVE = ["s_1", "s_I", "s_I2", "s_fI", "s_ffI", "s_sI", "s_ssI", "s_sfI", "s_oI", "s_ooI", "s_soI", "s_foI", "mx_I", "mx_I_f", "mx_I_s", "mx_I_o", "bb_mx_f", "bb_mx_s", "bb_mx_o", "bb_mn_f", "bb_mn_s", "bb_mn_o", "avg_i", "f_raw", "s_raw", "o_raw", "m_ss", "m_ff", "m_oo", "m_sf", "m_so", "m_fo"] -PKCOL = [getattr(ImageD11.cImageD11, p) for p in PKSAVE] - -def process(ds, ncpu, worker_args): - hname = ds.masterfile - scan_name = ds.scans[0] - frames_dset = scan_name + "/measurement/" + ds.detector - omega = ds.omega[0, :] - - n_frames = omega.shape[0] - - args = [(hname, frames_dset, i, omega[i], worker_args) for i in range(n_frames)] - - all_peaks = process_map(pps, args, chunksize=1) - - # make a dict to hold our results in - cf_2d_dict = {} - - # populate it with empty lists - for title in PKSAVE: - cf_2d_dict[title] = [] - - npks = [] - - for peak_props in all_peaks: - this_index, props = peak_props - - for title, prop_index in zip(PKSAVE, PKCOL): - cf_2d_dict[title].extend(props[:, prop_index]) - - npks.append(props.shape[0]) - - # convert to numpy arrays: - for key, value in cf_2d_dict.items(): - cf_2d_dict[key] = np.array(value) - - cf_3d_dict, spot3d_id = do3dmerge(cf_2d_dict, npks, omega) - - cf_2d = ImageD11.columnfile.colfile_from_dict(cf_2d_dict) - - cf_2d.addcolumn(spot3d_id, "spot3d_id") - cf_2d.addcolumn(cf_2d.o_raw, "omega") - - cf_3d = ImageD11.columnfile.colfile_from_dict(cf_3d_dict) - - cf_3d.addcolumn(np.arange(cf_3d.nrows), "index") - - return cf_2d, cf_3d diff --git a/ImageD11/nbGui/segmenter_gui.py b/ImageD11/nbGui/segmenter_gui.py index 667e5368..de23b10d 100644 --- a/ImageD11/nbGui/segmenter_gui.py +++ b/ImageD11/nbGui/segmenter_gui.py @@ -153,12 +153,12 @@ def __init__(self, dset, worker_func, process_func, counter="_roi1", scan=None, self.scan = scan self.idx = frame self.chooseframe(counter) - thresh_slider = widgets.IntSlider(value=options["threshold"], min=1, max=100, step=1, description='Threshold:') - smsig_slider = widgets.FloatSlider(value=options["smoothsigma"], min=0.0, max=1.0, step=0.05, description='Smoothsigma:') + thresh_slider = widgets.IntSlider(value=options["threshold"], min=0, max=10000, step=1, description='Threshold:') + smsig_slider = widgets.FloatSlider(value=options["smoothsigma"], min=0.0, max=3.0, step=0.05, description='Smoothsigma:') bgc_slider = widgets.FloatSlider(value=options["bgc"], min=0.0, max=1.0, step=0.05, description='bgc:') - minpx_slider = widgets.IntSlider(value=options["minpx"], min=1, max=5, step=1, description='minpx:') - mofft_slider = widgets.IntSlider(value=options["m_offset_thresh"], min=1, max=200, step=1, description='m_offset_thresh:') - mratt_slider = widgets.IntSlider(value=options["m_ratio_thresh"], min=1, max=200, step=1, description='m_ratio_thresh:') + minpx_slider = widgets.IntSlider(value=options["minpx"], min=0, max=50, step=1, description='minpx:') + mofft_slider = widgets.IntSlider(value=options["m_offset_thresh"], min=0, max=200, step=1, description='m_offset_thresh:') + mratt_slider = widgets.IntSlider(value=options["m_ratio_thresh"], min=0, max=2000, step=1, description='m_ratio_thresh:') self.widget = widgets.interactive(self.update_image, threshold=thresh_slider, smoothsigma=smsig_slider, @@ -196,7 +196,7 @@ def segment_frame(self): return image_worker, fc, sc, len(fc) def display(self): - self.fig, self.axs = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(22, 10), layout="constrained") + self.fig, self.axs = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(12, 8), layout="constrained") self.im1 = self.axs[0].imshow(self.raw_image, norm='log', vmin=100, vmax=1000) self.axs[0].set_title("Original image") self.im2 = self.axs[1].imshow(self.smoothed_image, cmap="viridis", norm='log', vmin=0.5, vmax=1000, interpolation="nearest") diff --git a/ImageD11/sinograms/dataset.py b/ImageD11/sinograms/dataset.py index 5621b6d5..e640221b 100644 --- a/ImageD11/sinograms/dataset.py +++ b/ImageD11/sinograms/dataset.py @@ -7,7 +7,7 @@ import ImageD11.grain import ImageD11.unitcell import ImageD11.sinograms.properties -from ImageD11.blobcorrector import eiger_spatial +from ImageD11.blobcorrector import correct_cf_with_dxdyfiles, correct_cf_with_spline from ImageD11.columnfile import colfile_from_dict """ @@ -75,19 +75,27 @@ class DataSet: ) STRINGLISTS = ("scans", "imagefiles", "sparsefiles") # sinograms - NDNAMES = ("omega", "dty", "nnz", "frames_per_file", "nlm", "frames_per_scan", "monitor") + NDNAMES = ( + "omega", + "dty", + "nnz", + "frames_per_file", + "nlm", + "frames_per_scan", + "monitor", + ) def __init__( - self, - dataroot=".", - analysisroot=".", - sample="sample", - dset="dataset", - detector="eiger", - omegamotor="rot_center", - dtymotor="dty", - filename=None, - analysispath=None + self, + dataroot=".", + analysisroot=".", + sample="sample", + dset="dataset", + detector="eiger", + omegamotor="rot_center", + dtymotor="dty", + filename=None, + analysispath=None, ): """The things we need to know to process data""" @@ -139,7 +147,9 @@ def __init__( def update_paths(self, force=False): # paths for processed data # root of analysis for this dataset for this sample: - self.analysispath_default = os.path.join(self.analysisroot, self.sample, self.dsname) + self.analysispath_default = os.path.join( + self.analysisroot, self.sample, self.dsname + ) if self.analysispath is None: self.analysispath = self.analysispath_default @@ -266,10 +276,10 @@ def import_scans(self, scans=None, hname=None): scan for scan in list(hin["/"]) if ( - scan.endswith(".1") - and ("measurement" in hin[scan]) - and (self.detector in hin[scan]["measurement"]) - and (self.omegamotor in hin[scan]["measurement"]) + scan.endswith(".1") + and ("measurement" in hin[scan]) + and (self.detector in hin[scan]["measurement"]) + and (self.omegamotor in hin[scan]["measurement"]) ) ] goodscans = [] @@ -305,7 +315,7 @@ def import_imagefiles(self): bad = [] for i, scan in enumerate(self.scans): if ("measurement" not in hin[scan]) or ( - self.detector not in hin[scan]["measurement"] + self.detector not in hin[scan]["measurement"] ): print("Bad scan", scan) bad.append(scan) @@ -323,8 +333,9 @@ def import_imagefiles(self): assert self.limapath == vsrc.dset_name self.frames_per_file = np.array(self.frames_per_file, int) self.sparsefiles = [ - os.path.join('sparsefiles', - name.replace("/", "_").replace(".h5", "_sparse.h5")) + os.path.join( + "sparsefiles", name.replace("/", "_").replace(".h5", "_sparse.h5") + ) for name in self.imagefiles ] logging.info("imported %d lima filenames" % (np.sum(self.frames_per_file))) @@ -337,11 +348,11 @@ def import_motors_from_master(self): """ # self.guess_motornames() self.omega = [ - None, - ] * len(self.scans) + None, + ] * len(self.scans) self.dty = [ - None, - ] * len(self.scans) + None, + ] * len(self.scans) with h5py.File(self.masterfile, "r") as hin: bad = [] for i, scan in enumerate(self.scans): @@ -496,25 +507,24 @@ def get_ring_current_per_scan(self): ring_currents / np.max(ring_currents) ) - def get_monitor(self, name='fpico6'): + def get_monitor(self, name="fpico6"): # masterfile or sparsefile hname = self.masterfile - if hasattr(self, 'sparsefile') and os.path.exists(self.sparsefile): + if hasattr(self, "sparsefile") and os.path.exists(self.sparsefile): hname = self.sparsefile monitor = [] - with h5py.File( hname, 'r') as hin: + with h5py.File(hname, "r") as hin: for scan in self.scans: - if scan.find('::')>-1: - snum, slc = scan.split('::') - lo, hi = [int(v) for v in slc[1:-1].split(':')] - mon = hin[snum]['measurement'][name][lo:hi] + if scan.find("::") > -1: + snum, slc = scan.split("::") + lo, hi = [int(v) for v in slc[1:-1].split(":")] + mon = hin[snum]["measurement"][name][lo:hi] else: - mon = hin[snum]['measurement'][name][:] + mon = hin[snum]["measurement"][name][:] monitor.append(mon) - self.monitor = np.concatenate(monitor).reshape( self.shape ) + self.monitor = np.concatenate(monitor).reshape(self.shape) return self.monitor - def guess_detector(self): """Guess which detector we are using from the masterfile""" @@ -595,11 +605,10 @@ def sinohist(self, weights=None, omega=None, dty=None, method="fast"): return histo def get_phases_from_disk(self): - if not hasattr(self,'parfile') or self.parfile is None: - raise AttributeError('Need self.parfile to load phases!') + if not hasattr(self, "parfile") or self.parfile is None: + raise AttributeError("Need self.parfile to load phases!") return ImageD11.unitcell.Phases(self.parfile) - @property def peaks_table(self): if self._peaks_table is None: @@ -626,15 +635,19 @@ def get_colfile_from_peaks_dict(self, peaks_dict=None): Uses self.pk2d if no peaks_dict provided""" # TODO add optional peaks mask - # Define spatial correction - spat = eiger_spatial(dxfile=self.e2dxfile, dyfile=self.e2dyfile) - if peaks_dict is None: peaks_dict = self.pk2d - # Generate columnfile from peaks table - cf = colfile_from_dict(spat(peaks_dict)) + cf = colfile_from_dict(peaks_dict) + + # Define spatial correction + if self.e2dxfile is not None: + cf = correct_cf_with_dxdyfiles(cf, self.e2dxfile, self.e2dyfile) + else: + if self.splinefile is not None: + cf = correct_cf_with_spline(cf, spline_file) + # Generate columnfile from peaks table return cf def update_colfile_pars(self, cf, phase_name=None): @@ -644,13 +657,13 @@ def update_colfile_pars(self, cf, phase_name=None): def get_cf_2d(self): if os.path.exists(self.col2dfile): - print('Loading existing colfile from', self.col2dfile) + print("Loading existing colfile from", self.col2dfile) return self.get_cf_2d_from_disk() return self.get_colfile_from_peaks_dict() def get_cf_4d(self): if os.path.exists(self.col4dfile): - print('Loading existing colfile from', self.col4dfile) + print("Loading existing colfile from", self.col4dfile) return self.get_cf_4d_from_disk() return self.get_colfile_from_peaks_dict(peaks_dict=self.pk4d) @@ -667,21 +680,25 @@ def get_cf_4d_from_disk(self): return cf_4d def get_grains_from_disk(self, phase_name=None): - group_name = 'grains' + group_name = "grains" if phase_name is not None: group_name = phase_name - grains = ImageD11.grain.read_grain_file_h5(self.grainsfile, group_name=group_name) - if phase_name is not None and hasattr(self, 'phases'): - print('Adding reference unitcells from self.phases') + grains = ImageD11.grain.read_grain_file_h5( + self.grainsfile, group_name=group_name + ) + if phase_name is not None and hasattr(self, "phases"): + print("Adding reference unitcells from self.phases") for g in grains: g.ref_unitcell = self.phases.unitcells[phase_name] return grains def save_grains_to_disk(self, grains, phase_name=None): - group_name = 'grains' + group_name = "grains" if phase_name is not None: group_name = phase_name - ImageD11.grain.write_grain_file_h5(self.grainsfile, grains, group_name=group_name) + ImageD11.grain.write_grain_file_h5( + self.grainsfile, grains, group_name=group_name + ) def import_nnz(self): """Read the nnz arrays from the sparsefiles""" @@ -825,7 +842,7 @@ def load(self, h5name=None, h5group="/"): if name in grp: stringlist = list(grp[name][()]) if hasattr(stringlist[0], "decode") or isinstance( - stringlist[0], np.ndarray + stringlist[0], np.ndarray ): data = [s.decode() for s in stringlist] else: @@ -841,7 +858,7 @@ def load(self, h5name=None, h5group="/"): return self -def load(h5name, h5group='/'): +def load(h5name, h5group="/"): ds_obj = DataSet(filename=h5name) return ds_obj From f035726036d5f24439e0aad64e933442a3c63faa Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Thu, 19 Sep 2024 15:59:48 +0200 Subject: [PATCH 3/3] cleanup for frelon peaksearch fix #324 --- ImageD11/sinograms/dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ImageD11/sinograms/dataset.py b/ImageD11/sinograms/dataset.py index e640221b..53fe39c2 100644 --- a/ImageD11/sinograms/dataset.py +++ b/ImageD11/sinograms/dataset.py @@ -645,7 +645,7 @@ def get_colfile_from_peaks_dict(self, peaks_dict=None): cf = correct_cf_with_dxdyfiles(cf, self.e2dxfile, self.e2dyfile) else: if self.splinefile is not None: - cf = correct_cf_with_spline(cf, spline_file) + cf = correct_cf_with_spline(cf, self.spline_file) # Generate columnfile from peaks table return cf