From 778c2329f2e51853ae8f821128b6acbf4ddbbc80 Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Tue, 31 Oct 2023 10:36:45 +0100 Subject: [PATCH] black format + ruff lint --- ImageD11/ImageD11_file_series.py | 296 +++-- ImageD11/ImageD11_thread.py | 19 +- ImageD11/ImageD11options.py | 24 +- ImageD11/__init__.py | 9 +- ImageD11/blobcorrector.py | 198 ++- ImageD11/cImageD11.py | 34 +- ImageD11/cImageD11_docstrings.py | 4 +- ImageD11/columnfile.py | 457 ++++--- ImageD11/compute_fazit.py | 179 +-- ImageD11/correct.py | 74 +- ImageD11/eps_sig_solver.py | 352 ++--- ImageD11/fft_index_refac.py | 351 ++--- ImageD11/finite_strain.py | 108 +- ImageD11/grain.py | 168 ++- ImageD11/grid_index_parallel.py | 323 ++--- ImageD11/guicommand.py | 63 +- ImageD11/gv_general.py | 206 +-- ImageD11/indexer.py | 578 +++++---- ImageD11/indexing.py | 1128 +++++++++-------- ImageD11/labelimage.py | 272 ++-- ImageD11/lattice_reduction.py | 401 +++--- ImageD11/license.py | 1 - ImageD11/nbGui/__init__.py | 2 +- ImageD11/nbGui/fit_geometry.py | 112 +- ImageD11/nbGui/plot3d.py | 81 +- ImageD11/peakmerge.py | 329 ++--- ImageD11/peaksearcher.py | 770 ++++++----- ImageD11/project/h5demo1.py | 105 +- .../make_h5_project_fails_no_external.py | 34 +- ImageD11/project/project.py | 157 +-- ImageD11/project/project_yaml.py | 39 +- ImageD11/project/projects.py | 3 +- ImageD11/project/test_json.py | 1 - ImageD11/rc_array.py | 126 +- ImageD11/refinegrains.py | 1049 +++++++-------- ImageD11/rotdex.py | 143 +-- ImageD11/rsv.py | 226 ++-- ImageD11/rsv_mapper.py | 501 ++++---- ImageD11/saintraw.py | 80 +- ImageD11/scale.py | 64 +- ImageD11/silxGui/silx_colfile.py | 145 +-- ImageD11/silxGui/silx_plot3d.py | 34 +- ImageD11/silxGui/silx_sptview.py | 84 +- ImageD11/simplex.py | 54 +- ImageD11/sinograms/__init__.py | 4 - ImageD11/sinograms/assemble_label.py | 143 ++- ImageD11/sinograms/dataset.py | 597 +++++---- ImageD11/sinograms/lima_segmenter.py | 289 +++-- ImageD11/sinograms/polefigures.py | 128 +- ImageD11/sinograms/pread.py | 125 +- ImageD11/sinograms/properties.py | 828 ++++++------ ImageD11/sinograms/roi_iradon.py | 161 +-- ImageD11/sinograms/sinogram2crysalis.py | 251 ++-- ImageD11/sparseframe.py | 603 ++++----- ImageD11/sym_u.py | 239 ++-- ImageD11/symops.py | 331 ++--- ImageD11/tkGui/guiindexer.py | 202 +-- ImageD11/tkGui/guimaker.py | 27 +- ImageD11/tkGui/guipeaksearch.py | 153 ++- ImageD11/tkGui/guisolver.py | 77 +- ImageD11/tkGui/guitransformer.py | 472 +++---- ImageD11/tkGui/listdialog.py | 66 +- ImageD11/tkGui/plot3d.py | 393 +++--- ImageD11/tkGui/twodplot.py | 337 ++--- ImageD11/transform.py | 657 +++++----- ImageD11/transformer.py | 670 ++++++---- ImageD11/unitcell.py | 640 +++++----- ImageD11/weighted_kde.py | 46 +- ImageD11/write_graindex_gv.py | 73 +- scripts/ImageD11_gui.py | 1 - scripts/filtergrain.py | 1 - scripts/fitgrain.py | 2 +- scripts/fix_spline.py | 4 +- scripts/id11_summarize.py | 1 - scripts/makemap.py | 4 +- scripts/merge_flt.py | 2 +- scripts/peaksearch.py | 2 - scripts/plotlayer.py | 3 +- scripts/spatialfix.py | 1 - 79 files changed, 9278 insertions(+), 7639 deletions(-) diff --git a/ImageD11/ImageD11_file_series.py b/ImageD11/ImageD11_file_series.py index dae6e3e8..3da69bcd 100644 --- a/ImageD11/ImageD11_file_series.py +++ b/ImageD11/ImageD11_file_series.py @@ -1,10 +1,4 @@ - from __future__ import print_function - -""" -To be moved to fabio sometime -""" - import fabio.file_series import fabio.fabioimage import fabio.openimage @@ -12,126 +6,202 @@ import gzip, bz2 from ImageD11 import ImageD11options +""" +To be moved to fabio sometime +""" + + def get_options(parser): - parser.add_argument("-5","--hdf5",action="store", type=str, - dest = "hdf5", default = None, - help = "hdf file containing input image series") + parser.add_argument( + "-5", + "--hdf5", + action="store", + type=str, + dest="hdf5", + default=None, + help="hdf file containing input image series", + ) # or, eventually: # stem, first, last, format, (omegas better be in the headers) - parser.add_argument("-n","--stem",action="store", type=str, - dest = "stem", default = None, - help = "stem name for input image series") - parser.add_argument("-f","--first",action="store", type=int, - dest = "first", default = None, - help = "first number for input image series") - parser.add_argument("-l","--last",action="store", type=int, - dest = "last", default = None, - help = "last number for input image series") - parser.add_argument("--ndigits", action="store", type=int, - dest = "ndigits", default = 4, - help = "Number of digits in file numbering [4]") - parser.add_argument("-P", "--padding", action="store", - choices=["Y","N"], - default="Y", dest="padding", - help="Is the image number to padded Y|N, e.g. "\ - "should 1 be 0001 or just 1 in image name, default=Y") - parser.add_argument("-F","--format",action="store", type=str, - dest = "format", default = ".edf", - help = "format [.edf] for input image series") - - parser.add_argument("-O", "--flood", action="store", - type=ImageD11options.ImageFileType(mode='r'), - dest = "flood", default = None, - help = "Flood") - - parser.add_argument("-d", "--dark", action="store", - dest = "dark", default = None, - type=ImageD11options.ImageFileType(mode='r'), - help = "Dark image") - parser.add_argument("-S", "--step", action="store", type=float, - dest = "OMEGASTEP", default = None, - help = "omega step size") - parser.add_argument("-T", "--start", action="store", type=float, - dest = "OMEGA", default = None, - help = "start omega") - parser.add_argument("--omega_motor", action="store", type=str, - dest = "omegamotor", default = "Omega", - help = "Header value to use for rotation motor position [Omega]") - parser.add_argument("--omega_motor_step", action="store", type=str, - dest = "omegamotorstep", default = "OmegaStep", - help = "Header value to use for rotation width [OmegaStep]") + parser.add_argument( + "-n", + "--stem", + action="store", + type=str, + dest="stem", + default=None, + help="stem name for input image series", + ) + parser.add_argument( + "-f", + "--first", + action="store", + type=int, + dest="first", + default=None, + help="first number for input image series", + ) + parser.add_argument( + "-l", + "--last", + action="store", + type=int, + dest="last", + default=None, + help="last number for input image series", + ) + parser.add_argument( + "--ndigits", + action="store", + type=int, + dest="ndigits", + default=4, + help="Number of digits in file numbering [4]", + ) + parser.add_argument( + "-P", + "--padding", + action="store", + choices=["Y", "N"], + default="Y", + dest="padding", + help="Is the image number to padded Y|N, e.g. " + "should 1 be 0001 or just 1 in image name, default=Y", + ) + parser.add_argument( + "-F", + "--format", + action="store", + type=str, + dest="format", + default=".edf", + help="format [.edf] for input image series", + ) + + parser.add_argument( + "-O", + "--flood", + action="store", + type=ImageD11options.ImageFileType(mode="r"), + dest="flood", + default=None, + help="Flood", + ) + + parser.add_argument( + "-d", + "--dark", + action="store", + dest="dark", + default=None, + type=ImageD11options.ImageFileType(mode="r"), + help="Dark image", + ) + parser.add_argument( + "-S", + "--step", + action="store", + type=float, + dest="OMEGASTEP", + default=None, + help="omega step size", + ) + parser.add_argument( + "-T", + "--start", + action="store", + type=float, + dest="OMEGA", + default=None, + help="start omega", + ) + parser.add_argument( + "--omega_motor", + action="store", + type=str, + dest="omegamotor", + default="Omega", + help="Header value to use for rotation motor position [Omega]", + ) + parser.add_argument( + "--omega_motor_step", + action="store", + type=str, + dest="omegamotorstep", + default="OmegaStep", + help="Header value to use for rotation width [OmegaStep]", + ) return parser -def get_series_from_hdf( hdf_file, dark = None, flood = None ): +def get_series_from_hdf(hdf_file, dark=None, flood=None): groups = hdf_file.listnames() for group in groups: imagenames = hdf_file[group].listnames() for image in imagenames: im = hdf_file[group][image] - om = float(im.attrs['Omega']) - data = im[:,:] + om = float(im.attrs["Omega"]) + data = im[:, :] if (dark, flood) != (None, None): data = data.astype(numpy.float32) if dark is not None: - numpy.subtract( data, dark, data ) + numpy.subtract(data, dark, data) if flood is not None: - numpy.divide( data, flood, data ) - yield fabio.fabioimage.fabioimage( data = data, - header = { - 'Omega': om } ) + numpy.divide(data, flood, data) + yield fabio.fabioimage.fabioimage(data=data, header={"Omega": om}) -def series_from_fabioseries( fabioseries, dark, flood, options ): + +def series_from_fabioseries(fabioseries, dark, flood, options): for filename in fabioseries: try: fim = fabio.openimage.openimage(filename) - except: - print("Missing image",filename) + except Exception: + print("Missing image", filename) continue if (dark, flood) != (None, None): fim.data = fim.data.astype(numpy.float32) if dark is not None: - numpy.subtract( fim.data, dark, fim.data ) + numpy.subtract(fim.data, dark, fim.data) if flood is not None: - numpy.divide( fim.data, flood, fim.data ) + numpy.divide(fim.data, flood, fim.data) if options.omegamotor in fim.header: - fim.header['Omega'] = float(fim.header[options.omegamotor]) + fim.header["Omega"] = float(fim.header[options.omegamotor]) try: - fim.header['OmegaStep'] = float(fim.header[options.omegamotorstep]) - except: - fim.header['OmegaStep'] = float(options.OMEGASTEP) + fim.header["OmegaStep"] = float(fim.header[options.omegamotorstep]) + except Exception: + fim.header["OmegaStep"] = float(options.OMEGASTEP) else: - fim.header['Omega'] = float(options.OMEGA) - fim.header['OmegaStep'] = float(options.OMEGASTEP) + fim.header["Omega"] = float(options.OMEGA) + fim.header["OmegaStep"] = float(options.OMEGASTEP) options.OMEGA = float(options.OMEGA) + float(options.OMEGASTEP) yield fim - -def get_series_from_stemnum( options, args, dark = None, flood = None ): +def get_series_from_stemnum(options, args, dark=None, flood=None): """ Returns a file series thing - not a fabio one """ - if options.format in ['bruker', 'BRUKER', 'Bruker']: + if options.format in ["bruker", "BRUKER", "Bruker"]: extn = "" - elif options.format == 'GE': + elif options.format == "GE": extn = "" else: extn = options.format - + fso = fabio.file_series.numbered_file_series( options.stem, options.first, options.last, extn, - digits = options.ndigits, - padding = options.padding ) - return series_from_fabioseries( fso , dark, flood, options ) - + digits=options.ndigits, + padding=options.padding, + ) + return series_from_fabioseries(fso, dark, flood, options) -def get_series_from_options( options, args ): + +def get_series_from_options(options, args): """ Returns a file series thing - not a fabio one @@ -141,36 +211,33 @@ def get_series_from_options( options, args ): try: if options.dark is not None: - dark = fabio.openimage.openimage( options.dark ).data + dark = fabio.openimage.openimage(options.dark).data else: dark = None - except: - print("Problem with your dark",options.dark) + except Exception: + print("Problem with your dark", options.dark) raise - + try: if options.flood is not None: - flood = fabio.openimage.openimage( options.flood ).data + flood = fabio.openimage.openimage(options.flood).data else: flood = None - except: - print("Problem with your flood",options.flood) + except Exception: + print("Problem with your flood", options.flood) raise - - if len(args) > 0 : - # We assume unlabelled arguments are filenames + if len(args) > 0: + # We assume unlabelled arguments are filenames fso = fabio.file_series.file_series(args) - return series_from_fabioseries( fso, dark, flood, options ) + return series_from_fabioseries(fso, dark, flood, options) if options.hdf5 is not None: hf = h5py.File(options.hdf5) # print "Getting images from",options.hdf5 - return get_series_from_hdf( hf, dark, flood ) - - return get_series_from_stemnum( options, args, - dark, flood) - + return get_series_from_hdf(hf, dark, flood) + + return get_series_from_stemnum(options, args, dark, flood) def getedfheader(filename): @@ -181,28 +248,29 @@ def getedfheader(filename): Adds a filename key at the top """ h = "filename = " - if filename[-3:]==".gz": - fp=gzip.GzipFile(filename,"rb") - elif filename [-4:]==".bz2": - fp=bz2.BZ2File(filename,"rb") + if filename[-3:] == ".gz": + fp = gzip.GzipFile(filename, "rb") + elif filename[-4:] == ".bz2": + fp = bz2.BZ2File(filename, "rb") else: try: - fp=open(filename,"rb") + fp = open(filename, "rb") except IOError: return "" - h=h+filename+";\n" - s=fp.read(1024) - if s.find("{")==-1: + h = h + filename + ";\n" + s = fp.read(1024) + if s.find("{") == -1: raise Exception("Not an edf file") while 1: - if s.find("}")>=0: - h=h+s[0:s.find("}")+2] + if s.find("}") >= 0: + h = h + s[0 : s.find("}") + 2] break else: - h=h+s - s=fp.read(1024) + h = h + s + s = fp.read(1024) return h + def motor_mne(hd): """ expands the _mne and _pos header items of edf headers @@ -211,20 +279,20 @@ def motor_mne(hd): order = [] for line in hd.split(";"): try: - key,vals = line.split("=") + key, vals = line.split("=") except ValueError: continue key = key.lstrip().rstrip() h[key] = vals.split(";")[0] - order.append( key ) + order.append(key) for k in order: if k.endswith("_mne"): stem = k.split("_")[0] - p = k.replace("_mne","_pos") + p = k.replace("_mne", "_pos") newkeys = h[k].split() newvals = h[p].split() for ik, iv in zip(newkeys, newvals): - kk = stem+":"+ik - h[kk]=iv - order.append( kk ) + kk = stem + ":" + ik + h[kk] = iv + order.append(kk) return h, order diff --git a/ImageD11/ImageD11_thread.py b/ImageD11/ImageD11_thread.py index a0482f10..c9d0d68b 100644 --- a/ImageD11/ImageD11_thread.py +++ b/ImageD11/ImageD11_thread.py @@ -1,5 +1,3 @@ - - from __future__ import print_function # ImageD11_v1.0 Software for beamline ID11 @@ -20,20 +18,24 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 0211-1307 USA try: - import Queue + pass except: # python 3? - import queue as Queue - + pass + import threading + # global stop_now stop_now = False + class ImageD11_thread(threading.Thread): - """ Add a stopping mechanism for unhandled exceptions """ + """Add a stopping mechanism for unhandled exceptions""" + def __init__(self, myname="ImageD11_thread"): - self.myname=myname + self.myname = myname threading.Thread.__init__(self) + def run(self): global stop_now try: @@ -41,8 +43,9 @@ def run(self): except: stop_now = True raise + def ImageD11_stop_now(self): global stop_now if stop_now: - print( "Got a stop in",self.myname) + print("Got a stop in", self.myname) return stop_now diff --git a/ImageD11/ImageD11options.py b/ImageD11/ImageD11options.py index 261cda66..25eeb744 100644 --- a/ImageD11/ImageD11options.py +++ b/ImageD11/ImageD11options.py @@ -1,4 +1,3 @@ - import os, logging # Try to manage the options coming and going to the different scripts @@ -8,42 +7,49 @@ # # Previously we had "optparse", this becomes "argparse" for later pythons # -# The idea is to use something like gooey or argparseui which hook to +# The idea is to use something like gooey or argparseui which hook to # the parsers _actions list to see what can be offered. # -# We define a few "types" corresponding to ImageD11 known filetypes to +# We define a few "types" corresponding to ImageD11 known filetypes to # help when identifying what an optiong is so we could hook up plotting # and editing to the different entities + class FileType(object): - def __init__(self, mode='r'): - assert mode in 'rw' + def __init__(self, mode="r"): + assert mode in "rw" self._mode = mode + def __call__(self, string): - if 'r' in self._mode: - if not os.path.exists( string ): + if "r" in self._mode: + if not os.path.exists(string): logging.error("File %s not found", string) return string + class ParameterFileType(FileType): pass + class ColumnFileType(FileType): pass + class UbiFileType(FileType): pass + class ImageFileType(FileType): pass + class SplineFileType(FileType): pass + class GvectorFileType(FileType): pass + class HdfFileType(FileType): pass - - diff --git a/ImageD11/__init__.py b/ImageD11/__init__.py index 2f8f74d8..bbb8f73e 100644 --- a/ImageD11/__init__.py +++ b/ImageD11/__init__.py @@ -17,10 +17,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ - -__version__ = "2.0.0" -__author__ = 'Jon Wright', -__author_email__ = 'wright@esrf.fr' - - +__version__ = "2.0.0" +__author__ = ("Jon Wright",) +__author_email__ = "wright@esrf.fr" diff --git a/ImageD11/blobcorrector.py b/ImageD11/blobcorrector.py index 387f3b1c..9a4e071b 100644 --- a/ImageD11/blobcorrector.py +++ b/ImageD11/blobcorrector.py @@ -1,6 +1,6 @@ from __future__ import print_function - + # ImageD11 Software for beamline ID11 # Copyright (C) 2021 Jon Wright # @@ -32,6 +32,7 @@ import fabio from scipy.interpolate import bisplev + def readfit2dfloats(filep, nfl): """ Interprets a 5E14.7 formatted fortran line @@ -46,17 +47,19 @@ def readfit2dfloats(filep, nfl): # logging.debug("readfit2dfloats:"+thisline) while i < 5 * 14: # logging.debug(str(i)+ thisline[i:i+14]) - ret.append(float(thisline[i:i+14]) ) + ret.append(float(thisline[i : i + 14])) j = j + 1 i = i + 14 - if j == nfl: + if j == nfl: break return ret -class correctorclass: #IGNORE:R0902 + +class correctorclass: # IGNORE:R0902 """ Applies a spatial distortion to a peak position using a fit2d splinefile """ + def __init__(self, argsplinefile, orientation="edf"): """ Argument is the name of a fit2d spline file @@ -73,8 +76,6 @@ def __init__(self, argsplinefile, orientation="edf"): self.tck2 = None if self.splinefile is not None: self.readfit2dspline(self.splinefile) - - def correct(self, xin, yin): """ @@ -85,13 +86,15 @@ def correct(self, xin, yin): if self.orientation == "edf": xcor = xin + bisplev(yin, xin, self.tck2) ycor = yin + bisplev(yin, xin, self.tck1) - else: - # fit2d does a flip - raise Exception("Spline orientations must be edf, convert " - "your image to edf and remake the spline") + else: + # fit2d does a flip + raise Exception( + "Spline orientations must be edf, convert " + "your image to edf and remake the spline" + ) # Unreachable code - we no longer accept this complexity # it means the spline file for ImageD11 bruker images - # is not the same as for fit2d. + # is not the same as for fit2d. # xpos = self.xmax - xin # xcor = xin - bisplev(yin, xpos, self.tck2) # ycor = yin + bisplev(yin, xpos, self.tck1) @@ -102,7 +105,7 @@ def make_pixel_lut(self, dims): Generate an x and y image which maps the array indices into floating point array indices (to be corrected for pixel size later) - returns + returns FIXME - check they are the right way around add some sort of known splinefile testcase """ @@ -111,17 +114,17 @@ def make_pixel_lut(self, dims): x_im = numpy.outer(numpy.arange(dims[0]), numpy.ones(dims[1])) y_im = numpy.outer(numpy.ones(dims[0]), numpy.arange(dims[1])) # xcor is tck2 - x_im = numpy.add( x_im, - bisplev( numpy.arange(dims[1]), - numpy.arange(dims[0]), - self.tck2 ).T, - x_im) + x_im = numpy.add( + x_im, + bisplev(numpy.arange(dims[1]), numpy.arange(dims[0]), self.tck2).T, + x_im, + ) # ycor is tck1 - y_im = numpy.add( y_im, - bisplev( numpy.arange(dims[1]), - numpy.arange(dims[0]), - self.tck1 ).T, - y_im) + y_im = numpy.add( + y_im, + bisplev(numpy.arange(dims[1]), numpy.arange(dims[0]), self.tck1).T, + y_im, + ) self.pixel_lut = x_im, y_im return self.pixel_lut @@ -133,9 +136,11 @@ def make_pos_lut(self, dims): """ if self.pos_lut is None: if self.pixel_lut is None: - self.make_pixel_lut(dims) - self.pos_lut = ( self.pixel_lut[0] * self.xsize, - self.pixel_lut[1] * self.ysize ) + self.make_pixel_lut(dims) + self.pos_lut = ( + self.pixel_lut[0] * self.xsize, + self.pixel_lut[1] * self.ysize, + ) return self.pos_lut def distort(self, xin, yin): @@ -151,8 +156,7 @@ def distort(self, xin, yin): ytmp = yin - bisplev(yold, xold, self.tck1) xtmp = xin - bisplev(yold, xold, self.tck2) # Second guess should be better - error = math.sqrt((xtmp - xold) * (xtmp - xold) + - (ytmp - yold) * (ytmp - yold) ) + error = math.sqrt((xtmp - xold) * (xtmp - xold) + (ytmp - yold) * (ytmp - yold)) ntries = 0 while error > self.tolerance: ntries = ntries + 1 @@ -160,8 +164,9 @@ def distort(self, xin, yin): yold = ytmp ytmp = yin - bisplev(yold, xold, self.tck1) xtmp = xin - bisplev(yold, xold, self.tck2) - error = math.sqrt((xtmp - xold) * (xtmp - xold) + - (ytmp - yold) * (ytmp - yold) ) + error = math.sqrt( + (xtmp - xold) * (xtmp - xold) + (ytmp - yold) * (ytmp - yold) + ) # print error,xold,x,yold,y if ntries == 10: raise Exception("Error getting the inverse spline to converge") @@ -174,18 +179,13 @@ def test(self, xin, yin): """ xtes, ytes = self.correct(xin, yin) xold, yold = self.distort(xtes, ytes) - error = math.sqrt( (xin - xold) * (xin - xold) + - (yin - yold) * (yin - yold)) + error = math.sqrt((xin - xold) * (xin - xold) + (yin - yold) * (yin - yold)) if error > self.tolerance: logging.error("Blobcorrector Test Failed!") raise Exception("Problem in correctorclass") - - - - - # read the fit2d array into a tck tuple + def readfit2dspline(self, name): """ Reads a fit2d spline file into a scipy/fitpack tuple, tck @@ -193,59 +193,55 @@ def readfit2dspline(self, name): """ fin = open(name, "r") # SPATIAL DISTORTION SPLINE INTERPOLATION COEFFICIENTS - myline = fin.readline() + myline = fin.readline() if myline[:7] != "SPATIAL": - raise SyntaxError(name + \ - ": file does not seem to be a fit2d spline file") - fin.readline() # BLANK LINE - fin.readline() # VALID REGION - myline = fin.readline() # the actual valid region, - # assuming xmin,ymin,xmax,ymax - logging.debug("xmin,ymin,xmax,ymax, read: "+myline) - self.xmin, self.ymin, self.xmax, self.ymax = \ - [float(z) for z in myline.split()] - myline = fin.readline() # BLANK - myline = fin.readline() # GRID SPACING, X-PIXEL SIZE, Y-PIXEL SIZE + raise SyntaxError(name + ": file does not seem to be a fit2d spline file") + fin.readline() # BLANK LINE + fin.readline() # VALID REGION + myline = fin.readline() # the actual valid region, + # assuming xmin,ymin,xmax,ymax + logging.debug("xmin,ymin,xmax,ymax, read: " + myline) + self.xmin, self.ymin, self.xmax, self.ymax = [float(z) for z in myline.split()] + myline = fin.readline() # BLANK + myline = fin.readline() # GRID SPACING, X-PIXEL SIZE, Y-PIXEL SIZE myline = fin.readline() - logging.debug("gridspace, xsize, ysize: "+myline) - self.gridspacing, self.xsize, self.ysize = \ - [float(z) for z in myline.split()] - fin.readline() # BLANK - fin.readline() # X-DISTORTION - myline = fin.readline() # two integers nx1,ny1 - logging.debug("nx1, ny1 read: "+myline) + logging.debug("gridspace, xsize, ysize: " + myline) + self.gridspacing, self.xsize, self.ysize = [float(z) for z in myline.split()] + fin.readline() # BLANK + fin.readline() # X-DISTORTION + myline = fin.readline() # two integers nx1,ny1 + logging.debug("nx1, ny1 read: " + myline) nx1, ny1 = [int(z) for z in myline.split()] # Now follow fit2d formatted line 5E14.7 tx1 = numpy.array(readfit2dfloats(fin, nx1), numpy.float32) ty1 = numpy.array(readfit2dfloats(fin, ny1), numpy.float32) - cf1 = numpy.array(readfit2dfloats(fin, (nx1 - 4) * (ny1 - 4)), - numpy.float32) - fin.readline() #BLANK - fin.readline() # Y-DISTORTION - myline = fin.readline() # two integers nx2, ny2 - nx2 , ny2 = [int(z) for z in myline.split()] + cf1 = numpy.array(readfit2dfloats(fin, (nx1 - 4) * (ny1 - 4)), numpy.float32) + fin.readline() # BLANK + fin.readline() # Y-DISTORTION + myline = fin.readline() # two integers nx2, ny2 + nx2, ny2 = [int(z) for z in myline.split()] tx2 = numpy.array(readfit2dfloats(fin, nx2), numpy.float32) ty2 = numpy.array(readfit2dfloats(fin, ny2), numpy.float32) - cf2 = numpy.array(readfit2dfloats(fin, (nx2 - 4) * (ny2 - 4)), - numpy.float32) + cf2 = numpy.array(readfit2dfloats(fin, (nx2 - 4) * (ny2 - 4)), numpy.float32) fin.close() # The 3 ,3 is the number of knots self.tck1 = (tx1, ty1, cf1, 3, 3) self.tck2 = (tx2, ty2, cf2, 3, 3) - - class perfect(correctorclass): """ To use on previously corrected when there is no splinefile Allows pixel size etc to be set """ + splinefile = "NO_CORRECTION_APPLIED" xsize = "UNKNOWN" ysize = "UNKNOWN" + def __init__(self): correctorclass.__init__(self, None) + def correct(self, xin, yin): """ Do nothing - just return the same values @@ -257,7 +253,7 @@ def make_pixel_lut(self, dims): Generate an x and y image which maps the array indices into floating point array indices (to be corrected for pixel size later) - returns + returns FIXME - check they are the right way around add some sort of known splinefile testcase """ @@ -269,66 +265,67 @@ def make_pixel_lut(self, dims): return self.pixel_lut - class eiger_spatial(object): - - def __init__(self, - dxfile="/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dx.edf", - dyfile="/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dy.edf",): + def __init__( + self, + dxfile="/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dx.edf", + dyfile="/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dy.edf", + ): self.dx = fabio.open(dxfile).data # x == fast direction at ID11 self.dy = fabio.open(dyfile).data # y == slow direction assert self.dx.shape == self.dy.shape - + def __call__(self, pks): - si = numpy.round(pks['s_raw']).astype(int) - fi = numpy.round(pks['f_raw']).astype(int) - pks['fc'] = self.dx[ si, fi ] + pks['f_raw'] - pks['sc'] = self.dy[ si, fi ] + pks['s_raw'] + si = numpy.round(pks["s_raw"]).astype(int) + fi = numpy.round(pks["f_raw"]).astype(int) + pks["fc"] = self.dx[si, fi] + pks["f_raw"] + pks["sc"] = self.dy[si, fi] + pks["s_raw"] return pks - + def pixel_lut(self): - """ returns (slow, fast) pixel postions of an image """ + """returns (slow, fast) pixel postions of an image""" s = self.dx.shape - i, j = numpy.mgrid[ 0:s[0], 0:s[1] ] + i, j = numpy.mgrid[0 : s[0], 0 : s[1]] return self.dy + j, self.dx + i - + + # -#""" -#http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/OWENS/LECT5/node5.html +# """ +# http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/OWENS/LECT5/node5.html # -#Various interpolation schemes can be used. +# Various interpolation schemes can be used. # A common one is bilinear interpolation, given by # -#v(x,y) = c1x + c2y + c3xy + c4, +# v(x,y) = c1x + c2y + c3xy + c4, # -#where v(x,y) is the grey value at position (x,y). -#Thus we have four coefficients to solve for. We use the known grey values +# where v(x,y) is the grey value at position (x,y). +# Thus we have four coefficients to solve for. We use the known grey values # of the 4 pixels -#surrounding the `come from' location to solve for the coefficients. +# surrounding the `come from' location to solve for the coefficients. # -#We need to solve the equation +# We need to solve the equation # -#v1 ( x1 y1 x1y1 1 ) c1 -#v2 = ( x2 y2 x2y2 1 ) c2 -#v3 ( x3 y3 x3y3 1 ) c3 -#v4 ( x4 y4 x4y4 1 ) c4 +# v1 ( x1 y1 x1y1 1 ) c1 +# v2 = ( x2 y2 x2y2 1 ) c2 +# v3 ( x3 y3 x3y3 1 ) c3 +# v4 ( x4 y4 x4y4 1 ) c4 # # -#or, in short, -#[V] = [M][C], +# or, in short, +# [V] = [M][C], # -#which implies -#[C] = [M]-1[V]. +# which implies +# [C] = [M]-1[V]. # -# This has to be done for every pixel location in the output image and +# This has to be done for every pixel location in the output image and # is thus a lot of computation! -# Alternatively one could simply use the integer pixel position closest +# Alternatively one could simply use the integer pixel position closest # to the `come from location'. # This is adequate for most cases. # -#""" +# """ -#def unwarpimage(image, xpositions, ypositions): +# def unwarpimage(image, xpositions, ypositions): # """ # xpositions/ypositions are floats giving pixel co-ords of the input image. # @@ -338,4 +335,3 @@ def pixel_lut(self): # Hence, for now, # """ # pass - diff --git a/ImageD11/cImageD11.py b/ImageD11/cImageD11.py index cd3709e1..655be6fe 100644 --- a/ImageD11/cImageD11.py +++ b/ImageD11/cImageD11.py @@ -19,41 +19,49 @@ # Check for the use of openmp interactions with os.fork and multiprocessing + def check_multiprocessing(): - """ You cannot safely use os.fork together with threads. + """You cannot safely use os.fork together with threads. But the cImageD11 codes uses threads via openmp. So please use forkserver or spawn for multiprocessing. - + https://discuss.python.org/t/concerns-regarding-deprecation-of-fork-with-alive-threads/33555 https://github.com/FABLE-3DXRD/ImageD11/issues/177 - + > Developers should respond by adjusting their use of multiprocessing or concurrent.futures > to explicitly specify either the "forkserver" or "spawn" start methods via a context. """ import multiprocessing + # Problem cases are: # child processes -> we will set num threads to 1 parent = None - if hasattr(multiprocessing,"parent_process"): + if hasattr(multiprocessing, "parent_process"): parent = multiprocessing.parent_process() # only for python 3.8 and up - cimaged11_omp_set_num_threads( 1 ) + cimaged11_omp_set_num_threads(1) # people wanting Nprocs * Mthreads need to reset after import # OMP_NUM_THREADS is not going to work for them # how are we going to remember this in the future?? # # now check for the fork issue - if ((multiprocessing.get_start_method(allow_none=False) == 'fork') and # we have the problem - (multiprocessing.get_start_method(allow_none=True) is None) and # by accident - ('forkserver' in multiprocessing.get_all_start_methods())): # so fix it - multiprocessing.set_start_method('forkserver') - if ((multiprocessing.get_start_method(allow_none=False) == 'fork') and # we have the problem - (parent is not None)): + if ( + (multiprocessing.get_start_method(allow_none=False) == "fork") + and ( # we have the problem + multiprocessing.get_start_method(allow_none=True) is None + ) + and ("forkserver" in multiprocessing.get_all_start_methods()) # by accident + ): # so fix it + multiprocessing.set_start_method("forkserver") + if ( + multiprocessing.get_start_method(allow_none=False) == "fork" + ) and ( # we have the problem + parent is not None + ): # Tell them about it. warnings.warn(__doc__) - if cimaged11_omp_get_max_threads() == 0: # The code was compiled without openmp OPENMP = False @@ -62,7 +70,7 @@ def check_multiprocessing(): OPENMP = True check_multiprocessing() - + # For 32 or 64 bits nbyte = struct.calcsize("P") # 4 or 8 diff --git a/ImageD11/cImageD11_docstrings.py b/ImageD11/cImageD11_docstrings.py index cb0b0d5d..ef17a594 100644 --- a/ImageD11/cImageD11_docstrings.py +++ b/ImageD11/cImageD11_docstrings.py @@ -1,4 +1,3 @@ - """Autogenerated from make_pyf.py Edit in _cImageD11.pyf please""" array_histogram = """computes the histogram for an image Go through the data to compute a histogram of the values @@ -343,4 +342,5 @@ "splat", "tosparse_u16", "uint16_to_float_darkflm", - "uint16_to_float_darksub"] \ No newline at end of file + "uint16_to_float_darksub", +] diff --git a/ImageD11/columnfile.py b/ImageD11/columnfile.py index dd1fcbf7..10e8bb39 100644 --- a/ImageD11/columnfile.py +++ b/ImageD11/columnfile.py @@ -1,5 +1,3 @@ - - from __future__ import print_function @@ -28,7 +26,6 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 0211-1307 USA -import warnings from ImageD11 import parameters, transform import numpy as np @@ -54,14 +51,20 @@ "Max_o", "dety", "detz", - "gx", "gy", "gz", - "hr", "kr", "zr", - "xl", "yl", "zl", + "gx", + "gy", + "gz", + "hr", + "kr", + "zr", + "xl", + "yl", + "zl", "drlv2", "tth", "eta", - "tth_hist_prob" - ] + "tth_hist_prob", +] INTS = [ "Number_of_pixels", @@ -72,45 +75,49 @@ "Min_s", "Max_s", "spot3d_id", - "h", "k", "l", - "onfirst", "onlast", "labels", + "h", + "k", + "l", + "onfirst", + "onlast", + "labels", "labels", "Grain", "grainno", "grain_id", "IKEY", "npk2d", - ] +] # 9 elements -ij = ["%d%d"%(i,j) for i in range(1,4) for j in range(1,4)] +ij = ["%d%d" % (i, j) for i in range(1, 4) for j in range(1, 4)] # Uij, UBIij -LONGFLOATS = [s+v for v in ij for s in ["U","UBI"]] - - - +LONGFLOATS = [s + v for v in ij for s in ["U", "UBI"]] # symmetric 6 elements -ijs = [11,22,33,23,13,12] +ijs = [11, 22, 33, 23, 13, 12] # 'eps'+ij+'_s' -EXPONENTIALS = [ h+str(v)+t for v in ijs for h,t in [ ('eps',''), - ('eps','_s'), - ('sig',''), - ('sig','_s') ] ] +EXPONENTIALS = [ + h + str(v) + t + for v in ijs + for h, t in [("eps", ""), ("eps", "_s"), ("sig", ""), ("sig", "_s")] +] # 'e' ij1 'e' ij2 '_s' for triangle ij -EXPONENTIALS += ["%s%d%s%d%s"%(h,ijs[i],h,ijs[j],t) - for i in range(6) - for j in range(i,6) - for h,t in [ ('e',''),('e','_s'),('s',''),('s','_s')] ] +EXPONENTIALS += [ + "%s%d%s%d%s" % (h, ijs[i], h, ijs[j], t) + for i in range(6) + for j in range(i, 6) + for h, t in [("e", ""), ("e", "_s"), ("s", ""), ("s", "_s")] +] # testing for line compression -#from ImageD11.columnfile import LONGFLOATS as oldf -#from ImageD11.columnfile import EXPONENTIALS as olde -#assert set(oldf) == set(LONGFLOATS) -#assert set(olde) == set(EXPONENTIALS) -#print "These seem to match" +# from ImageD11.columnfile import LONGFLOATS as oldf +# from ImageD11.columnfile import EXPONENTIALS as olde +# assert set(oldf) == set(LONGFLOATS) +# assert set(olde) == set(EXPONENTIALS) +# print "These seem to match" FORMATS = {} @@ -126,22 +133,24 @@ for f in EXPONENTIALS: FORMATS[f] = "%.4e" + def clean(str_lst): - """ trim whitespace from titles """ + """trim whitespace from titles""" return [s.lstrip().rstrip() for s in str_lst] def fillcols(lines, cols): - for i,line in enumerate(lines): - for j,item in enumerate(line.split()): + for i, line in enumerate(lines): + for j, item in enumerate(line.split()): cols[j][i] = float(item) + class columnfile(object): """ Class to represent an ascii file containing multiple named columns """ - def __init__(self, filename = None, new = False): + def __init__(self, filename=None, new=False): self.filename = filename self.__data = [] self.titles = [] @@ -157,18 +166,18 @@ def __init__(self, filename = None, new = False): def get_bigarray(self): # if someone uses this we have to go back to the old # representation - if not hasattr(self,"__bigarray") or len(self.__data) != len(self.__bigarray): - self.__bigarray = np.asarray( self.__data ) + if not hasattr(self, "__bigarray") or len(self.__data) != len(self.__bigarray): + self.__bigarray = np.asarray(self.__data) self.__data = self.__bigarray return self.__bigarray def set_bigarray(self, ar): -# print("setting bigarray",len(ar),len(ar[0])) -# warnings.filter("once") -# warnings.warn("Setting bigarray on colfile", stacklevel=2) - assert len(ar) == len(self.titles), \ - "Wrong length %d to set bigarray"%(len(ar))+\ - " ".join(self.titles) + # print("setting bigarray",len(ar),len(ar[0])) + # warnings.filter("once") + # warnings.warn("Setting bigarray on colfile", stacklevel=2) + assert len(ar) == len(self.titles), "Wrong length %d to set bigarray" % ( + len(ar) + ) + " ".join(self.titles) nrows = len(ar[0]) for col in ar: assert len(col) == nrows, "ar is not rectangular" @@ -188,48 +197,54 @@ def set_attributes(self): # use empty arrays for now... # not sure why this was avoided in the past? pass - #return + # return for i, name in enumerate(self.titles): setattr(self, name, self.__data[i]) - a = getattr(self, name) - assert len(a) == self.nrows, "%s %d %d"%(name,len(a),self.nrows) + a = getattr(self, name) + assert len(a) == self.nrows, "%s %d %d" % (name, len(a), self.nrows) def __getitem__(self, key): if key in self.titles: - return self.getcolumn( key ) + return self.getcolumn(key) else: raise KeyError def keys(self): return self.titles - def removerows( self, column_name, values, tol = 0 ): + def removerows(self, column_name, values, tol=0): """ removes rows where self.column_name == values values is a list of values to remove column name should be in self.titles tol is for floating point (fuzzy) comparisons versus integer """ - col = self.getcolumn( column_name ) - if tol <= 0: # integer comparisons - col = col.astype( np.int ) - mskfun = lambda x, val, t: x == val - else: # floating point - mskfun = lambda x, val, t: np.abs( x - val ) < t - mask = mskfun( col, values[0], tol ) + col = self.getcolumn(column_name) + if tol <= 0: # integer comparisons + col = col.astype(np.int) + + def mskfun(x, val, t): + return x == val + + else: # floating point + + def mskfun(x, val, t): + return np.abs(x - val) <= t + + mask = mskfun(col, values[0], tol) for val in values[1:]: - np.logical_or( mskfun( col, val, tol ), mask, mask) - self.filter( ~mask ) + np.logical_or(mskfun(col, val, tol), mask, mask) + self.filter(~mask) - def sortby( self, name ): + def sortby(self, name): """ Sort arrays according to column named "name" """ - col = self.getcolumn( name ) - order = np.argsort( col ) - self.reorder( order ) + col = self.getcolumn(name) + order = np.argsort(col) + self.reorder(order) - def reorder( self, indices ): + def reorder(self, indices): """ Put array into the order given by indices ... normally indices would come from np.argsort of something @@ -243,18 +258,18 @@ def writefile(self, filename): write an ascii columned file """ self.chkarray() - fout = open(filename,"w") # appending + fout = open(filename, "w") # appending # Write as "# name = value\n" parnames = list(self.parameters.get_parameters().keys()) parnames.sort() for p in parnames: - fout.write("# %s = %s\n"%(p, str(self.parameters.get(p) ) ) ) + fout.write("# %s = %s\n" % (p, str(self.parameters.get(p)))) # self.parameters.saveparameters(filename) # Now titles line fout.write("#") format_str = "" for title in self.titles: - fout.write(" %s"%(title)) + fout.write(" %s" % (title)) try: format_str += " %s" % (FORMATS[title]) except KeyError: @@ -262,7 +277,7 @@ def writefile(self, filename): fout.write("\n") format_str += "\n" for i in range(self.nrows): - fout.write(format_str % tuple( [col[i] for col in self.__data] ) ) + fout.write(format_str % tuple([col[i] for col in self.__data])) fout.close() def readfile(self, filename): @@ -275,52 +290,50 @@ def readfile(self, filename): self.nrows = 0 i = 0 # Check if this is a hdf file: magic number - with open(filename,"rb") as f: + with open(filename, "rb") as f: magic = f.read(4) # 1 2 3 4 bytes - if magic == b'\x89HDF': + if magic == b"\x89HDF": print("Reading your columnfile in hdf format") - colfile_from_hdf( filename, obj = self ) + colfile_from_hdf(filename, obj=self) return - with open(filename,"r") as f: + with open(filename, "r") as f: raw = f.readlines() header = True while header and i < len(raw): - if len(raw[i].lstrip())==0: - # skip blank lines - i += 1 - continue - if raw[i][0] == "#": - # title line - if raw[i].find("=") > -1: - # key = value line - name, value = clean(raw[i][1:].split("=",1)) - self.parameters.addpar( - parameters.par( name, value ) ) - else: - self.titles = raw[i][1:].split() - i += 1 - else: - header = False + if len(raw[i].lstrip()) == 0: + # skip blank lines + i += 1 + continue + if raw[i][0] == "#": + # title line + if raw[i].find("=") > -1: + # key = value line + name, value = clean(raw[i][1:].split("=", 1)) + self.parameters.addpar(parameters.par(name, value)) + else: + self.titles = raw[i][1:].split() + i += 1 + else: + header = False try: - row0 = [ float( v ) for v in raw[i].split() ] - lastrow = [ float( v ) for v in raw[-1].split() ] - if len(row0) == len(lastrow ): - nrows = len(raw)-i + row0 = [float(v) for v in raw[i].split()] + lastrow = [float(v) for v in raw[-1].split()] + if len(row0) == len(lastrow): + nrows = len(raw) - i last = len(raw) else: - nrows = len(raw)-i-1 # skip the last row - last = len(raw)-1 - cols = [ np.empty( nrows , float ) for _ in range(len(row0))] - fillcols( raw[i:last], cols ) - self.__data=cols + nrows = len(raw) - i - 1 # skip the last row + last = len(raw) - 1 + cols = [np.empty(nrows, float) for _ in range(len(row0))] + fillcols(raw[i:last], cols) + self.__data = cols except: - raise # Exception("Problem interpreting your colfile") + raise # Exception("Problem interpreting your colfile") self.ncols, self.nrows = len(row0), nrows self.parameters.dumbtypecheck() self.set_attributes() - def filter(self, mask): """ mask is an nrows long array of true/false @@ -328,7 +341,7 @@ def filter(self, mask): self.chkarray() if len(mask) != self.nrows: raise Exception("Mask is the wrong size") - msk = np.array( mask, dtype=bool ) + msk = np.array(mask, dtype=bool) # back to list here self.__data = [col[msk] for col in self.__data] self.nrows = len(self.__data[0]) @@ -338,10 +351,10 @@ def copy(self): """ Returns a (deep) copy of the columnfile """ - cnw = columnfile(self.filename, new = True) + cnw = columnfile(self.filename, new=True) self.chkarray() - cnw.titles = [t for t in self.titles ] - cnw.parameters = parameters.parameters( **self.parameters.parameters ) + cnw.titles = [t for t in self.titles] + cnw.parameters = parameters.parameters(**self.parameters.parameters) cnw.bigarray = [col.copy() for col in self.__data] cnw.ncols = self.ncols cnw.set_attributes() @@ -352,12 +365,12 @@ def copyrows(self, rows): Returns a copy of select rows of the columnfile """ self.chkarray() - cnw = columnfile(self.filename, new = True) - cnw.titles = [t for t in self.titles ] + cnw = columnfile(self.filename, new=True) + cnw.titles = [t for t in self.titles] cnw.parameters = self.parameters cnw.bigarray = [col[rows] for col in self.__data] - #cnw.ncols, cnw.nrows = cnw.bigarray.shape - #cnw.set_attributes() + # cnw.ncols, cnw.nrows = cnw.bigarray.shape + # cnw.set_attributes() return cnw def chkarray(self): @@ -382,13 +395,13 @@ def addcolumn(self, col, name): # raise Exception("Already got a column called "+name) else: # assert self.bigarray.shape == (self.ncols, self.nrows) - data = np.asanyarray( col ) - assert data.shape[0] == self.nrows + data = np.asanyarray(col) + assert data.shape[0] == self.nrows self.titles.append(name) - idx = len(self.titles)-1 + idx = len(self.titles) - 1 self.ncols += 1 - self.__data.append( data ) - setattr(self, name, self.__data[idx] ) + self.__data.append(data) + setattr(self, name, self.__data[idx]) # Not obvious, but might be a useful alias setcolumn = addcolumn @@ -399,16 +412,16 @@ def getcolumn(self, name): """ if name in self.titles: return self.__data[self.titles.index(name)] - raise KeyError("Name "+name+" not in file") + raise KeyError("Name " + name + " not in file") - def setparameters( self, pars ): + def setparameters(self, pars): """ update the parameters """ self.parameters = pars self.parameters.dumbtypecheck() - def updateGeometry(self, pars=None ): + def updateGeometry(self, pars=None): """ changing or not the parameters it (re)-computes: xl,yl,zl = ImageD11.transform.compute_xyz_lab @@ -416,7 +429,7 @@ def updateGeometry(self, pars=None ): gx,gy,gz = ImageD11.transform.compute_g_vectors """ if pars is not None: - self.setparameters( pars ) + self.setparameters(pars) pars = self.parameters if "sc" in self.titles and "fc" in self.titles: pks = self.sc, self.fc @@ -424,57 +437,65 @@ def updateGeometry(self, pars=None ): pks = self.xc, self.yc else: raise Exception("columnfile file misses xc/yc or sc/fc") - xl,yl,zl = transform.compute_xyz_lab( pks, - **pars.parameters) - peaks_xyz = np.array((xl,yl,zl)) - assert "omega" in self.titles,"No omega column" - om = self.omega * float( pars.get("omegasign") ) - tth, eta = transform.compute_tth_eta_from_xyz( - peaks_xyz, om, - **pars.parameters) + xl, yl, zl = transform.compute_xyz_lab(pks, **pars.parameters) + peaks_xyz = np.array((xl, yl, zl)) + assert "omega" in self.titles, "No omega column" + om = self.omega * float(pars.get("omegasign")) + tth, eta = transform.compute_tth_eta_from_xyz(peaks_xyz, om, **pars.parameters) gx, gy, gz = transform.compute_g_vectors( - tth, eta, om, - wvln = pars.get("wavelength"), - wedge = pars.get("wedge"), - chi = pars.get("chi") ) - modg = np.sqrt( gx * gx + gy * gy + gz * gz ) - self.addcolumn(xl,"xl") - self.addcolumn(yl,"yl") - self.addcolumn(zl,"zl") - self.addcolumn(tth, "tth", ) - self.addcolumn(eta, "eta", ) + tth, + eta, + om, + wvln=pars.get("wavelength"), + wedge=pars.get("wedge"), + chi=pars.get("chi"), + ) + modg = np.sqrt(gx * gx + gy * gy + gz * gz) + self.addcolumn(xl, "xl") + self.addcolumn(yl, "yl") + self.addcolumn(zl, "zl") + self.addcolumn( + tth, + "tth", + ) + self.addcolumn( + eta, + "eta", + ) self.addcolumn(gx, "gx") self.addcolumn(gy, "gy") self.addcolumn(gz, "gz") - self.addcolumn(modg, "ds") # dstar - + self.addcolumn(modg, "ds") # dstar class newcolumnfile(columnfile): - """ Just like a columnfile, but for creating new - files """ + """Just like a columnfile, but for creating new + files""" + def __init__(self, titles): columnfile.__init__(self, filename=None, new=True) self.titles = titles self.ncols = len(titles) - - -def colfile_from_dict( c ): - """ convert from a dictonary of numpy arrays """ + + +def colfile_from_dict(c): + """convert from a dictonary of numpy arrays""" titles = list(c.keys()) nrows = len(c[titles[0]]) for t in titles: assert len(c[t]) == nrows, t - colf = newcolumnfile( titles=titles ) + colf = newcolumnfile(titles=titles) colf.nrows = nrows - colf.set_bigarray( [ c[t] for t in titles ] ) + colf.set_bigarray([c[t] for t in titles]) return colf try: import h5py, os - def colfile_to_hdf( colfile, hdffile, name=None, compression=None, - compression_opts=None): + + def colfile_to_hdf( + colfile, hdffile, name=None, compression=None, compression_opts=None + ): """ Copy a columnfile into hdf file FIXME TODO - add the parameters somewhere (attributes??) @@ -482,146 +503,192 @@ def colfile_to_hdf( colfile, hdffile, name=None, compression=None, if isinstance(colfile, columnfile): c = colfile else: - c = columnfile( colfile ) + c = columnfile(colfile) if isinstance(hdffile, h5py.File): h = hdffile opened = False else: - h = h5py.File( hdffile , 'a') # Appending if exists + h = h5py.File(hdffile, "a") # Appending if exists opened = True if name is None: # Take the file name try: name = os.path.split(c.filename)[-1] except: - name = 'peaks' + name = "peaks" if name in list(h.keys()): g = h[name] else: - g = h.create_group( name ) - g.attrs['ImageD11_type'] = 'peaks' + g = h.create_group(name) + g.attrs["ImageD11_type"] = "peaks" for t in c.titles: if t in INTS: ty = np.int32 else: ty = np.float32 # print "adding",t,ty - dat = getattr(c, t).astype( ty ) + dat = getattr(c, t).astype(ty) if t in list(g.keys()): if g[t].shape != dat.shape: - g[t].resize( dat.shape ) + g[t].resize(dat.shape) g[t][:] = dat else: - g.create_dataset( t, data = dat, - compression=compression, - compression_opts=compression_opts ) + g.create_dataset( + t, + data=dat, + compression=compression, + compression_opts=compression_opts, + ) if opened: h.close() - def colfileobj_to_hdf( cf, hdffile, name=None): + def colfileobj_to_hdf(cf, hdffile, name=None): """ Save a columnfile into hdf file format FIXME TODO - add the parameters somewhere (attributes??) """ - h = h5py.File(hdffile, 'a' ) + h = h5py.File(hdffile, "a") if name is None: name = str(cf.filename) try: - g = h.create_group( name ) + g = h.create_group(name) except: print(name, h) raise - g.attrs['ImageD11_type'] = 'peaks' + g.attrs["ImageD11_type"] = "peaks" for t in cf.titles: if t in INTS: ty = np.int32 else: ty = np.float32 - g.create_dataset( t, data = getattr(cf, t).astype( ty ) ) + g.create_dataset(t, data=getattr(cf, t).astype(ty)) h.close() - def colfile_from_hdf( hdffile , name=None, obj=None ): + def colfile_from_hdf(hdffile, name=None, obj=None): """ Read a columnfile from a hdf file FIXME TODO - add the parameters somewhere (attributes??) """ - import time - h = h5py.File( hdffile, 'r' ) - if hasattr(h, 'listnames'): + + h = h5py.File(hdffile, "r") + if hasattr(h, "listnames"): groups = h.listnames() - else: # API changed + else: # API changed groups = list(h.keys()) if name is not None: if name in groups: g = h[name] else: print(groups) - raise Exception("Did not find your "+str(name)+" in "+hdffile) + raise Exception("Did not find your " + str(name) + " in " + hdffile) else: - groups = [g for g in groups - if 'ImageD11_type' in h[g].attrs and - h[g].attrs['ImageD11_type'] in ('peaks', b'peaks') ] - assert len(groups) == 1, "Your hdf file has many groups. Which one??"+str(groups) + groups = [ + g + for g in groups + if "ImageD11_type" in h[g].attrs + and h[g].attrs["ImageD11_type"] in ("peaks", b"peaks") + ] + assert len(groups) == 1, "Your hdf file has many groups. Which one??" + str( + groups + ) g = h[groups[0]] name = groups[0] - if hasattr(g, 'listnames'): + if hasattr(g, "listnames"): titles = g.listnames() - else: # API changed + else: # API changed titles = list(g.keys()) otitles = [t for t in titles] otitles.sort() newtitles = [] # Put them back in the order folks might have hard wired in their # programs - for t in ['sc', 'fc', 'omega' , 'Number_of_pixels', 'avg_intensity', - 's_raw', 'f_raw', 'sigs', 'sigf', 'covsf' , 'sigo', 'covso', - 'covfo', 'sum_intensity', 'sum_intensity^2', 'IMax_int', 'IMax_s', - 'IMax_f', 'IMax_o', 'Min_s', 'Max_s', 'Min_f', 'Max_f', 'Min_o', - 'Max_o', 'dety', 'detz', 'onfirst', 'onlast', 'spot3d_id', 'xl', - 'yl', 'zl', 'tth', 'eta', 'gx', 'gy', 'gz']: + for t in [ + "sc", + "fc", + "omega", + "Number_of_pixels", + "avg_intensity", + "s_raw", + "f_raw", + "sigs", + "sigf", + "covsf", + "sigo", + "covso", + "covfo", + "sum_intensity", + "sum_intensity^2", + "IMax_int", + "IMax_s", + "IMax_f", + "IMax_o", + "Min_s", + "Max_s", + "Min_f", + "Max_f", + "Min_o", + "Max_o", + "dety", + "detz", + "onfirst", + "onlast", + "spot3d_id", + "xl", + "yl", + "zl", + "tth", + "eta", + "gx", + "gy", + "gz", + ]: if t in otitles: newtitles.append(t) otitles.remove(t) # Anything else goes in alphabetically [newtitles.append(t) for t in otitles] - assert len(newtitles) == len( titles ) + assert len(newtitles) == len(titles) if obj is None: - col = columnfile( filename=name, new=True ) + col = columnfile(filename=name, new=True) else: col = obj - col.nrows = len( g[newtitles[0]] ) + col.nrows = len(g[newtitles[0]]) for name in newtitles: - col.addcolumn( g[name][:].copy(), name ) + col.addcolumn(g[name][:].copy(), name) h.close() return col except ImportError: + def hdferr(): raise Exception("You do not have h5py installed!") - def colfile_to_hdf( a,b,name=None): + def colfile_to_hdf(a, b, name=None): hdferr() - def colfile_from_hdf( hdffile , name=None ): + def colfile_from_hdf(hdffile, name=None): hdferr() - def colfileobj_to_hdf( cf, hdffile, name=None): + def colfileobj_to_hdf(cf, hdffile, name=None): hdferr() + def bench(): """ Compares the timing for reading with columfile versus np.loadtxt """ import sys, time + start = time.time() import cProfile, pstats + pr = cProfile.Profile() pr.enable() colf = columnfile(sys.argv[1]) pr.disable() - ps = pstats.Stats(pr, stream=sys.stdout ) - ps.sort_stats('tottime') + ps = pstats.Stats(pr, stream=sys.stdout) + ps.sort_stats("tottime") ps.reverse_order() print(colf.bigarray.shape) print("ImageD11", time.time() - start) @@ -633,25 +700,24 @@ def bench(): # os.system("time -p ./a.out") - - import sqlite3 as database_module + # Perhaps other modules follow the same api. # Doubtless one does not get away with using a filename? -def colfile2db( colfilename, dbname ): +def colfile2db(colfilename, dbname): """ Read the columnfile into a database Ignores parameter metadata (not used yet) """ - colf = columnfile( colfilename ) - dbo = database_module.connect( dbname ) + colf = columnfile(colfilename) + dbo = database_module.connect(dbname) curs = dbo.cursor() # Build up columnames and types to make table tablecols = [] # Not allowed for sql to have ^ in string - colf.titles = [t.replace("^","_pow_") for t in colf.titles] + colf.titles = [t.replace("^", "_pow_") for t in colf.titles] for name in colf.titles: if name in INTS: tablecols.append(name + " INTEGER") @@ -660,17 +726,16 @@ def colfile2db( colfilename, dbname ): tablecols.append(name + " REAL") continue tablecols.append(name + " REAL") - curs.execute("create table peaks \n( " + \ - " , ".join(tablecols) + " ) ; \n" ) + curs.execute("create table peaks \n( " + " , ".join(tablecols) + " ) ; \n") # Make a format string for inserting data - ins = "insert into peaks values (" + \ - ",".join(["?"]*colf.ncols) +") ;" + ins = "insert into peaks values (" + ",".join(["?"] * colf.ncols) + ") ;" # insert the data for i in range(colf.nrows): - curs.execute( ins , tuple(colf.bigarray[:, i]) ) + curs.execute(ins, tuple(colf.bigarray[:, i])) curs.close() dbo.commit() dbo.close() + if __name__ == "__main__": bench() diff --git a/ImageD11/compute_fazit.py b/ImageD11/compute_fazit.py index a3761bce..1837f39d 100644 --- a/ImageD11/compute_fazit.py +++ b/ImageD11/compute_fazit.py @@ -1,4 +1,3 @@ - from __future__ import print_function @@ -23,12 +22,12 @@ class xydisp: - required_pars = ["wavelength", "distance", # etc - ] - - - def __init__(self, splinefile = None, - parfile = None): + required_pars = [ + "wavelength", + "distance", # etc + ] + + def __init__(self, splinefile=None, parfile=None): """ splinefile = fit2d spline file, or None for images that are already corrected @@ -38,41 +37,40 @@ def __init__(self, splinefile = None, plugin. """ self.splinefile = splinefile - + if self.splinefile is None: self.spatial = blobcorrector.perfect() else: - self.spatial = blobcorrector.correctorclass( splinefile ) + self.spatial = blobcorrector.correctorclass(splinefile) self.parfile = parfile self.pars = parameters.parameters() - self.pars.loadparameters( parfile ) + self.pars.loadparameters(parfile) for key in self.required_pars: - + if key not in self.pars.parameters: - raise Exception("Missing parameter "+str(key)) + raise Exception("Missing parameter " + str(key)) def compute_tth_eta(self, dims): """ Find the twotheta and azimuth images """ assert len(dims) == 2 - xim, yim = self.spatial.make_pixel_lut( dims ) + xim, yim = self.spatial.make_pixel_lut(dims) self.dims = dims - peaks = [ numpy.ravel(xim), numpy.ravel(yim) ] - tth, eta = transform.compute_tth_eta( peaks, - **self.pars.get_parameters() ) - assert len(tth) == dims[0]*dims[1] - assert len(eta) == dims[0]*dims[1] + peaks = [numpy.ravel(xim), numpy.ravel(yim)] + tth, eta = transform.compute_tth_eta(peaks, **self.pars.get_parameters()) + assert len(tth) == dims[0] * dims[1] + assert len(eta) == dims[0] * dims[1] # Now we have the twotheta and azimuth images in memory # they are in degrees - - self.tth = numpy.reshape( tth, dims ) -# self.eta = numpy.mod(numpy.reshape( eta, dims ), 360)-180 - self.eta = numpy.reshape( eta, dims )-eta.mean() + + self.tth = numpy.reshape(tth, dims) + # self.eta = numpy.mod(numpy.reshape( eta, dims ), 360)-180 + self.eta = numpy.reshape(eta, dims) - eta.mean() self.compute_rad_arc() - + def compute_rad_arc(self): """ This part needs more work - how to properly define the output @@ -87,26 +85,26 @@ def compute_rad_arc(self): arclength = tth_rad * eta_rad # x-axis, eg [0], is tth - tthmax = numpy.max( self.tth ) - tthmin = numpy.min( self.tth ) - tthstep = (tthmax - tthmin)/(self.dims[0] - 1) - self.tthbin = numpy.floor( (self.tth - tthmin)/tthstep ) - self.tthvals = numpy.arange(tthmin,tthmax+tthstep*0.5,tthstep) + tthmax = numpy.max(self.tth) + tthmin = numpy.min(self.tth) + tthstep = (tthmax - tthmin) / (self.dims[0] - 1) + self.tthbin = numpy.floor((self.tth - tthmin) / tthstep) + self.tthvals = numpy.arange(tthmin, tthmax + tthstep * 0.5, tthstep) # Ideally we want the arc bins to vary with tth? - #arcmax = numpy.max( arclength ) - #arcmin = numpy.min( arclength ) + # arcmax = numpy.max( arclength ) + # arcmin = numpy.min( arclength ) # 4 corners of image arcmin = arclength.min() arcmax = arclength.max() - #from matplotlib.pylab import imshow,show, colorbar - #imshow(arclength) - #colorbar() - #show() - arcstep = (arcmax - arcmin)/(self.dims[1] - 1) - arcmid = 0.5*(arcmax+arcmin) + # from matplotlib.pylab import imshow,show, colorbar + # imshow(arclength) + # colorbar() + # show() + arcstep = (arcmax - arcmin) / (self.dims[1] - 1) + arcmid = 0.5 * (arcmax + arcmin) # Make integer pixel id images - self.arcbin = numpy.floor((arclength - arcmid)/arcstep )+self.dims[1]/2 + self.arcbin = numpy.floor((arclength - arcmid) / arcstep) + self.dims[1] / 2 assert self.tthbin.min() >= 0 assert self.tthbin.max() < self.dims[0], self.tthbin.max() @@ -116,7 +114,7 @@ def compute_rad_arc(self): # Now convert these to displacements compared to input image # Use the same code as for the spline case to get the "x/y" images ideal = blobcorrector.perfect() - idealx, idealy = ideal.make_pixel_lut( self.dims ) + idealx, idealy = ideal.make_pixel_lut(self.dims) self.dx = self.tthbin - idealx self.dy = self.arcbin - idealy @@ -126,34 +124,63 @@ def write(self, stemname): """ im = edfimage.edfimage() im.data = self.dx - im.write( "%s_dx.edf"%(stemname), force_type = numpy.float32) + im.write("%s_dx.edf" % (stemname), force_type=numpy.float32) im = edfimage.edfimage() im.data = self.dy - im.write( "%s_dy.edf"%(stemname), force_type = numpy.float32) - numpy.save("%s_tth.npy"%(stemname),self.tthvals) + im.write("%s_dy.edf" % (stemname), force_type=numpy.float32) + numpy.save("%s_tth.npy" % (stemname), self.tthvals) + def get_options(parser): - parser.add_option("-p", "--pars", action="store",type="string", - dest = "pars", default = None, - help = "ImageD11 parameter file for experiment") - - parser.add_option("-o", "--output", action="store", type="string", - dest = "output", default = None, - help = "stem name for output x/y edf images") - - parser.add_option("-s", "--splinefile", action="store", type="string", - dest = "spline", default = None, - help = "Name of fit2d spline file for spatial dist") - - parser.add_option("--nf", action="store", type="int", - dest = "nf", default = 2048, - help = "Number of pixels in fast direction, eg 2048") - - parser.add_option("--ns", action="store", type="int", - dest = "ns", default = 2048, - help = "Number of pixels in slow direction, eg 2048") - + parser.add_option( + "-p", + "--pars", + action="store", + type="string", + dest="pars", + default=None, + help="ImageD11 parameter file for experiment", + ) + + parser.add_option( + "-o", + "--output", + action="store", + type="string", + dest="output", + default=None, + help="stem name for output x/y edf images", + ) + + parser.add_option( + "-s", + "--splinefile", + action="store", + type="string", + dest="spline", + default=None, + help="Name of fit2d spline file for spatial dist", + ) + + parser.add_option( + "--nf", + action="store", + type="int", + dest="nf", + default=2048, + help="Number of pixels in fast direction, eg 2048", + ) + + parser.add_option( + "--ns", + action="store", + type="int", + dest="ns", + default=2048, + help="Number of pixels in slow direction, eg 2048", + ) + return parser @@ -161,14 +188,15 @@ def main(): """ A CLI interface """ - import sys, time, os, logging - start = time.time() - root = logging.getLogger('') - root.setLevel( logging.WARNING ) + import sys, os, logging + + root = logging.getLogger("") + root.setLevel(logging.WARNING) try: from optparse import OptionParser + parser = OptionParser() - parser = get_options( parser ) + parser = get_options(parser) options, args = parser.parse_args() except SystemExit: raise @@ -177,23 +205,18 @@ def main(): print("\nSorry, there was a problem interpreting your command line") raise - if options.pars is None: print("Failed: You must supply a parameters file, -p option") sys.exit() - if not os.path.exists( options.pars ): - print("Cannot find your file",options.pars) + if not os.path.exists(options.pars): + print("Cannot find your file", options.pars) sys.exit() - worker = xydisp( - splinefile = options.spline, - parfile = options.pars - ) - - worker.compute_tth_eta( (options.nf, options.ns) ) - worker.write( options.output ) - + worker = xydisp(splinefile=options.spline, parfile=options.pars) + + worker.compute_tth_eta((options.nf, options.ns)) + worker.write(options.output) + - if __name__ == "__main__": main() diff --git a/ImageD11/correct.py b/ImageD11/correct.py index 9ac4e649..d6997c1e 100644 --- a/ImageD11/correct.py +++ b/ImageD11/correct.py @@ -1,4 +1,3 @@ - from __future__ import print_function @@ -6,12 +5,21 @@ from PIL import ImageFilter # These don't work -filternames = [ "BLUR", "CONTOUR", "DETAIL", "EDGE_ENHANCE", - "EDGE_ENHANCE_MORE", "EMBOSS", "FIND_EDGES", "SMOOTH", - "SMOOTH_MORE", "SHARPEN"] +filternames = [ + "BLUR", + "CONTOUR", + "DETAIL", + "EDGE_ENHANCE", + "EDGE_ENHANCE_MORE", + "EMBOSS", + "FIND_EDGES", + "SMOOTH", + "SMOOTH_MORE", + "SHARPEN", +] filters = {} for f in filternames: - filters[f] = getattr( ImageFilter, f ) + filters[f] = getattr(ImageFilter, f) filternames.append("MedianFilter(3)") filters["MedianFilter(3)"] = ImageFilter.MedianFilter(3) @@ -21,13 +29,16 @@ # fixme - subtracting median filtered # coarser medians - eg rebinned too -def correct(data_object, - dark = None, - flood = None, - do_median = False, - monitorval = None, - monitorcol = None, - filterlist = [] ): + +def correct( + data_object, + dark=None, + flood=None, + do_median=False, + monitorval=None, + monitorcol=None, + filterlist=[], +): """ Does the dark and flood corrections Also PIL filters @@ -35,49 +46,48 @@ def correct(data_object, picture = data_object.data.astype(numpy.float32) if dark is not None: # This is meant to be quicker - picture = numpy.subtract( picture , dark, picture ) + picture = numpy.subtract(picture, dark, picture) data_object.data = picture if flood is not None: - picture = numpy.divide( picture, flood, picture ) + picture = numpy.divide(picture, flood, picture) data_object.data = picture if monitorcol is not None and monitorval is not None: if monitorcol not in data_object.header: - print("Missing header value for normalise",monitorcol,\ - data_object.filename) + print( + "Missing header value for normalise", monitorcol, data_object.filename + ) else: try: - scal = monitorval / float( data_object.header[monitorcol] ) - picture = numpy.multiply( picture, scal, picture ) + scal = monitorval / float(data_object.header[monitorcol]) + picture = numpy.multiply(picture, scal, picture) data_object.data = picture - #print "scaled",scal, + # print "scaled",scal, except: - print("Scale overflow",monitorcol, monitorval, data_object.filename) + print("Scale overflow", monitorcol, monitorval, data_object.filename) if do_median: # We do this after corrections # The expectation is that this is a median on the azimuth # direction of a previously radially transformed image # Gives the liquid contribution - med = numpy.median( picture ) + med = numpy.median(picture) # FIXME - if True: # Suboption - save the median or not? - obj = fabio.deconstruct_filename( data_object.header['filename'] ) + if True: # Suboption - save the median or not? + obj = fabio.deconstruct_filename(data_object.header["filename"]) obj.extension = ".bkm" medfilename = obj.tostring() - med.tofile( medfilename , sep = "\n") - picture = numpy.subtract( picture , med, picture ) + med.tofile(medfilename, sep="\n") + picture = numpy.subtract(picture, med, picture) # Apply series of PIL filters - if len( filterlist ) > 0: + if len(filterlist) > 0: pim = data_object.toPIL16() - print("Applied", end=' ') + print("Applied", end=" ") for item in filterlist: if item in filternames: try: - pim = pim.filter( filters[ item ]) + pim = pim.filter(filters[item]) except: raise - print(item, end=' ') - data_object.data = numpy.array( pim ) + print(item, end=" ") + data_object.data = numpy.array(pim) return data_object - - diff --git a/ImageD11/eps_sig_solver.py b/ImageD11/eps_sig_solver.py index dbf4648e..8f05b669 100644 --- a/ImageD11/eps_sig_solver.py +++ b/ImageD11/eps_sig_solver.py @@ -1,4 +1,3 @@ - from __future__ import print_function # Get Strain/Stress from ImageD11 UBI/map files @@ -19,66 +18,84 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import numpy as np -import math -from ImageD11 import transform, unitcell, columnfile -from ImageD11.parameters import par, parameters +from ImageD11.parameters import parameters from ImageD11.grain import read_grain_file + try: - from FitAllB.conversion import grain2sample, strain2stress,formStiffnessMV + from FitAllB.conversion import strain2stress, formStiffnessMV except: - print( "You need to install FitAllB") - print( "You will get error messages if you try to compute strain!") -from xfab.tools import ubi_to_u_and_eps,ubi_to_cell + print("You need to install FitAllB") + print("You will get error messages if you try to compute strain!") +from xfab.tools import ubi_to_u_and_eps, ubi_to_cell + def readubis(ubifile): - """read ubifile and return a list of ubi arrays """ + """read ubifile and return a list of ubi arrays""" f = open(ubifile, "r") ubisread = [] u = [] for line in f: - if line[0]=="#": + if line[0] == "#": continue - vals = [ float(x) for x in line.split() ] + vals = [float(x) for x in line.split()] if len(vals) == 3: u = u + [vals] - if len(u)==3: + if len(u) == 3: ubisread.append(np.array(u)) u = [] f.close() return ubisread + def write_ubi_file(filename, ubilist): - """ save 3x3 matrices into file """ - f=open(filename,"w") + """save 3x3 matrices into file""" + f = open(filename, "w") for u in ubilist: - f.write("%f %f %f\n" %(u[0][0],u[0][1],u[0][2])) - f.write("%f %f %f\n" %(u[1][0],u[1][1],u[1][2])) - f.write("%f %f %f\n\n"%(u[2][0],u[2][1],u[2][2])) - f.close() - + f.write("%f %f %f\n" % (u[0][0], u[0][1], u[0][2])) + f.write("%f %f %f\n" % (u[1][0], u[1][1], u[1][2])) + f.write("%f %f %f\n\n" % (u[2][0], u[2][1], u[2][2])) + f.close() class solver: """ A class for getting strain and stress tensors """ - def __init__(self, - unitcell=None, - ubis=None, - crystal_symmetry=None, - c11=None,c12=None,c13=None,c14=None,c15=None,c16=None, - c22=None,c23=None,c24=None,c25=None,c26=None, - c33=None,c34=None,c35=None,c36=None, - c44=None,c45=None,c46=None, - c55=None,c56=None, - c66=None): + + def __init__( + self, + unitcell=None, + ubis=None, + crystal_symmetry=None, + c11=None, + c12=None, + c13=None, + c14=None, + c15=None, + c16=None, + c22=None, + c23=None, + c24=None, + c25=None, + c26=None, + c33=None, + c34=None, + c35=None, + c36=None, + c44=None, + c45=None, + c46=None, + c55=None, + c56=None, + c66=None, + ): """ unitcell would be a list of six elements [a, b, c, alpha, beta, gamma] ubis would be a list of orientation matrices as by ImageD11 convention symmetry would be isotropic, cubic, tetragonal_high... (see FitAllB.conversion) to form the stiffness tensor C The rest of the arguments are parameters. """ - + self.cell__a = None self.cell__b = None self.cell__c = None @@ -86,7 +103,7 @@ def __init__(self, self.cell_beta = None self.cell_gamma = None if unitcell is not None: - if len(unitcell)==6: + if len(unitcell) == 6: self.cell__a = unitcell[0] self.cell__b = unitcell[1] self.cell__c = unitcell[2] @@ -95,147 +112,198 @@ def __init__(self, self.cell_gamma = unitcell[5] else: raise Exception("The unit cell must be defined by six parameters!") - self.ubis=ubis - self.crystal_symmetry=crystal_symmetry - self.c11=c11 - self.c12=c12 - self.c13=c13 - self.c14=c14 - self.c15=c15 - self.c16=c16 - self.c22=c22 - self.c23=c23 - self.c24=c24 - self.c25=c25 - self.c26=c26 - self.c33=c33 - self.c34=c34 - self.c35=c35 - self.c36=c36 - self.c44=c44 - self.c45=c45 - self.c46=c46 - self.c55=c55 - self.c56=c56 - self.c66=c66 - self.parameterobj = parameters(cell__a=self.cell__a, - cell__b=self.cell__b, - cell__c=self.cell__c, - cell_alpha=self.cell_alpha, - cell_beta=self.cell_beta, - cell_gamma=self.cell_gamma, - crystal_symmetry=self.crystal_symmetry, - c11=self.c11,c12=self.c12,c13=self.c13,c14=self.c14,c15=self.c15,c16=self.c16, - c22=self.c22,c23=self.c23,c24=self.c24,c25=self.c25,c26=self.c26, - c33=self.c33,c34=self.c34,c35=self.c35,c36=self.c36, - c44=self.c44,c45=self.c45,c46=self.c46, - c55=self.c55,c56=self.c56, - c66=self.c66) - self.epsilon=[] - self.sigma=[] - - - def loadmap(self,filename): + self.ubis = ubis + self.crystal_symmetry = crystal_symmetry + self.c11 = c11 + self.c12 = c12 + self.c13 = c13 + self.c14 = c14 + self.c15 = c15 + self.c16 = c16 + self.c22 = c22 + self.c23 = c23 + self.c24 = c24 + self.c25 = c25 + self.c26 = c26 + self.c33 = c33 + self.c34 = c34 + self.c35 = c35 + self.c36 = c36 + self.c44 = c44 + self.c45 = c45 + self.c46 = c46 + self.c55 = c55 + self.c56 = c56 + self.c66 = c66 + self.parameterobj = parameters( + cell__a=self.cell__a, + cell__b=self.cell__b, + cell__c=self.cell__c, + cell_alpha=self.cell_alpha, + cell_beta=self.cell_beta, + cell_gamma=self.cell_gamma, + crystal_symmetry=self.crystal_symmetry, + c11=self.c11, + c12=self.c12, + c13=self.c13, + c14=self.c14, + c15=self.c15, + c16=self.c16, + c22=self.c22, + c23=self.c23, + c24=self.c24, + c25=self.c25, + c26=self.c26, + c33=self.c33, + c34=self.c34, + c35=self.c35, + c36=self.c36, + c44=self.c44, + c45=self.c45, + c46=self.c46, + c55=self.c55, + c56=self.c56, + c66=self.c66, + ) + self.epsilon = [] + self.sigma = [] + + def loadmap(self, filename): try: - self.map=read_grain_file(filename) - self.ubis=[x.ubi for x in self.map] + self.map = read_grain_file(filename) + self.ubis = [x.ubi for x in self.map] except: - print("error when reading %s\n",filename) + print("error when reading %s\n", filename) raise - - - def loadpars(self,filename=None): + + def loadpars(self, filename=None): if filename is not None: self.parameterobj.loadparameters(filename) self.parameterobj.update_other(self) - #update also the unitcell list (because the element are included in parameterobj but not the list): - #self.unitcell=[self.cell__a, self.cell__b, self.cell__c, self.cell_alpha, self.cell_beta, self.cell_gamma] + # update also the unitcell list (because the element are included in parameterobj but not the list): + # self.unitcell=[self.cell__a, self.cell__b, self.cell__c, self.cell_alpha, self.cell_beta, self.cell_gamma] def updateparameters(self): self.savepars() - self.pars=self.parameterobj.parameters - #update also the unitcell list (because the element are included in parameterobj but not the list): - #self.unitcell=[self.cell__a, self.cell__b, self.cell__c, self.cell_alpha, self.cell_beta, self.cell_gamma] + self.pars = self.parameterobj.parameters + # update also the unitcell list (because the element are included in parameterobj but not the list): + # self.unitcell=[self.cell__a, self.cell__b, self.cell__c, self.cell_alpha, self.cell_beta, self.cell_gamma] - def savepars(self,filename=None): + def savepars(self, filename=None): self.parameterobj.update_yourself(self) if filename is not None: self.parameterobj.saveparameters(filename) - - + def unitcell(self): - return [self.cell__a, self.cell__b, self.cell__c, self.cell_alpha, self.cell_beta, self.cell_gamma] - - def setunitcell(self,uc): - """ this is used to set all the unit cell elements in one shot """ + return [ + self.cell__a, + self.cell__b, + self.cell__c, + self.cell_alpha, + self.cell_beta, + self.cell_gamma, + ] + + def setunitcell(self, uc): + """this is used to set all the unit cell elements in one shot""" self.cell__a = uc[0] self.cell__b = uc[1] self.cell__c = uc[2] self.cell_alpha = uc[3] self.cell_beta = uc[4] self.cell_gamma = uc[5] - - + def MVStiffness(self): - return formStiffnessMV(crystal_system=self.crystal_symmetry, - c11=self.c11,c12=self.c12,c13=self.c13,c14=self.c14,c15=self.c15,c16=self.c16, - c22=self.c22,c23=self.c23,c24=self.c24,c25=self.c25,c26=self.c26, - c33=self.c33,c34=self.c34,c35=self.c35,c36=self.c36, - c44=self.c44,c45=self.c45,c46=self.c46, - c55=self.c55,c56=self.c56, - c66=self.c66) - - - def compute_write_eps_sig(self,outputfile): - """ Compute strain and stress in crystal and sample co-ordinates system """ - + return formStiffnessMV( + crystal_system=self.crystal_symmetry, + c11=self.c11, + c12=self.c12, + c13=self.c13, + c14=self.c14, + c15=self.c15, + c16=self.c16, + c22=self.c22, + c23=self.c23, + c24=self.c24, + c25=self.c25, + c26=self.c26, + c33=self.c33, + c34=self.c34, + c35=self.c35, + c36=self.c36, + c44=self.c44, + c45=self.c45, + c46=self.c46, + c55=self.c55, + c56=self.c56, + c66=self.c66, + ) + + def compute_write_eps_sig(self, outputfile): + """Compute strain and stress in crystal and sample co-ordinates system""" + if self.ubis is not None: - + writestress = True - - f = open(outputfile,'w') - ''' the used parameters will be the header of the output file''' - for k,v in sorted(self.parameterobj.parameters.items()): - f.write(("%s %s\n")%(k,v)) - ''' write titles''' + + f = open(outputfile, "w") + """ the used parameters will be the header of the output file""" + for k, v in sorted(self.parameterobj.parameters.items()): + f.write(("%s %s\n") % (k, v)) + """ write titles""" f.write("##############################################\n") - f.write("cell__a cell__b cell__c cell_alpha cell_beta cell_gamma u11 u12 u13 u21 u22 u23 u31 u32 u33 ") - f.write("eps11_c eps22_c eps33_c eps12_c eps13_c eps23_c eps11_s eps22_s eps33_s eps12_s eps13_s eps23_s ") - f.write("sig11_c sig22_c sig33_c sig12_c sig13_c sig23_c sig11_s sig22_s sig33_s sig12_s sig13_s sig23_s\n") - - '''this is the part where we compute strain and stress''' + f.write( + "cell__a cell__b cell__c cell_alpha cell_beta cell_gamma u11 u12 u13 u21 u22 u23 u31 u32 u33 " + ) + f.write( + "eps11_c eps22_c eps33_c eps12_c eps13_c eps23_c eps11_s eps22_s eps33_s eps12_s eps13_s eps23_s " + ) + f.write( + "sig11_c sig22_c sig33_c sig12_c sig13_c sig23_c sig11_s sig22_s sig33_s sig12_s sig13_s sig23_s\n" + ) + + """this is the part where we compute strain and stress""" for ubi in self.ubis: U, eps = ubi_to_u_and_eps(ubi, self.unitcell()) - #U, eps = ubi_to_u_and_eps(ubi, ubi_to_cell(self.ubis[0])) - epsM = [ [ eps[0], eps[1], eps[2] ], #write the strain tensor list as a matrix - [ eps[1], eps[3], eps[4] ], - [ eps[2], eps[4], eps[5] ] ] - epsS = np.dot( U, np.dot( epsM, U.T ) ) #epsilon in sample co-ordinates - sigM= np.empty((3,3)) - sigS= np.empty((3,3)) + # U, eps = ubi_to_u_and_eps(ubi, ubi_to_cell(self.ubis[0])) + epsM = [ + [ + eps[0], + eps[1], + eps[2], + ], # write the strain tensor list as a matrix + [eps[1], eps[3], eps[4]], + [eps[2], eps[4], eps[5]], + ] + epsS = np.dot(U, np.dot(epsM, U.T)) # epsilon in sample co-ordinates + sigM = np.empty((3, 3)) + sigS = np.empty((3, 3)) try: - sigM = strain2stress( np.array(epsM), self.MVStiffness() ) #write the stress tensor as a symmetric matrix in crystal co-ordinates - sigS = np.dot( U, np.dot( sigM, U.T ) ) #sigma in sample co-ordinates + sigM = strain2stress( + np.array(epsM), self.MVStiffness() + ) # write the stress tensor as a symmetric matrix in crystal co-ordinates + sigS = np.dot(U, np.dot(sigM, U.T)) # sigma in sample co-ordinates except: - print("couldn't compute stress! please check the crystal_symmetry parameters and elastic constants") + print( + "couldn't compute stress! please check the crystal_symmetry parameters and elastic constants" + ) writestress = False - - - ''' writing down the results''' - - f.write(("%f "*6)%tuple(ubi_to_cell(ubi))) - f.write(("%f "*9)%tuple(U.ravel())) + + """ writing down the results""" + + f.write(("%f " * 6) % tuple(ubi_to_cell(ubi))) + f.write(("%f " * 9) % tuple(U.ravel())) ligne = "" - for i,j in [(0,0),(1,1),(2,2),(0,1),(0,2),(1,2)]: - ligne = ligne + " " + str(100.*np.array(epsM)[i,j]) - for i,j in [(0,0),(1,1),(2,2),(0,1),(0,2),(1,2)]: - ligne = ligne + " " + str(100.*epsS[i,j]) - if writestress==True: - for i,j in [(0,0),(1,1),(2,2),(0,1),(0,2),(1,2)]: - ligne = ligne + " " + str(np.array(sigM)[i,j]) - for i,j in [(0,0),(1,1),(2,2),(0,1),(0,2),(1,2)]: - ligne = ligne + " " + str(sigS[i,j]) - + for i, j in [(0, 0), (1, 1), (2, 2), (0, 1), (0, 2), (1, 2)]: + ligne = ligne + " " + str(100.0 * np.array(epsM)[i, j]) + for i, j in [(0, 0), (1, 1), (2, 2), (0, 1), (0, 2), (1, 2)]: + ligne = ligne + " " + str(100.0 * epsS[i, j]) + if writestress is True: + for i, j in [(0, 0), (1, 1), (2, 2), (0, 1), (0, 2), (1, 2)]: + ligne = ligne + " " + str(np.array(sigM)[i, j]) + for i, j in [(0, 0), (1, 1), (2, 2), (0, 1), (0, 2), (1, 2)]: + ligne = ligne + " " + str(sigS[i, j]) + f.write(ligne[1:]) f.write("\n") f.close() diff --git a/ImageD11/fft_index_refac.py b/ImageD11/fft_index_refac.py index 730bb726..f9ca95fc 100644 --- a/ImageD11/fft_index_refac.py +++ b/ImageD11/fft_index_refac.py @@ -1,5 +1,3 @@ - - from __future__ import print_function @@ -17,29 +15,39 @@ from ImageD11 import labelimage, cImageD11, columnfile + def get_options(parser): - parser.add_argument( '-n', '--ngrid', - action = 'store', - dest = 'npx', - type = int, - help = 'number of points in the fft grid [128]', - default = 128 ) - parser.add_argument( '-r', '--max_res', - action = 'store', - dest = 'mr', - type = float, - help = 'Maximum resolution limit for fft (d-spacing) [1.0]', - default = 1.0) - parser.add_argument( '-s', '--nsig', - action = 'store', - dest = 'nsig', - type = float, - help = 'Number of sigma for patterson peaksearch threshold [5]', - default = 5) + parser.add_argument( + "-n", + "--ngrid", + action="store", + dest="npx", + type=int, + help="number of points in the fft grid [128]", + default=128, + ) + parser.add_argument( + "-r", + "--max_res", + action="store", + dest="mr", + type=float, + help="Maximum resolution limit for fft (d-spacing) [1.0]", + default=1.0, + ) + parser.add_argument( + "-s", + "--nsig", + action="store", + dest="nsig", + type=float, + help="Number of sigma for patterson peaksearch threshold [5]", + default=5, + ) return parser -def refine_vector( v, gv, tol=0.25, ncycles=25, precision=1e-6 ): +def refine_vector(v, gv, tol=0.25, ncycles=25, precision=1e-6): """ Refine a (single) real space lattice vector Input @@ -61,46 +69,45 @@ def refine_vector( v, gv, tol=0.25, ncycles=25, precision=1e-6 ): Solve for shifts and iterates reducing tolerance as 1/(ncycles+1) Peaks that change index during refinement are removed - Stops on : + Stops on : no peaks reassigned ncycles runs out |shift| / |vec| < precision """ assert gv.shape[1] == 3 vref = v.copy() - mg = (gv*gv).sum(axis=1) - mold = None - wt = 1./(mg + mg.max()*0.01) + mg = (gv * gv).sum(axis=1) + wt = 1.0 / (mg + mg.max() * 0.01) gvt = gv.T.copy() for i in range(ncycles): - hr = np.dot( vref, gvt ) # npks - hi = np.round( hr ) # npks - diff = hi - hr # npks - m = abs(diff) < tol/(i+1) # keep or not ? - diff = (diff*wt)[m] # select peaks used - grad = (gvt*wt).T[m] - gvt = gvt[:,m] + hr = np.dot(vref, gvt) # npks + hi = np.round(hr) # npks + diff = hi - hr # npks + m = abs(diff) < tol / (i + 1) # keep or not ? + diff = (diff * wt)[m] # select peaks used + grad = (gvt * wt).T[m] + gvt = gvt[:, m] wt = wt[m] # lsq problem: - rhs = np.dot( grad.T, diff ) - mat = np.dot( grad.T, grad ) + rhs = np.dot(grad.T, diff) + mat = np.dot(grad.T, grad) # avoid problems if singular: - U,s,V = np.linalg.svd( mat ) - one_over_s = np.where( s/s.max() < 1e-6, 1, 1./s ) - mati = np.dot( U, np.dot(np.diag( one_over_s ), V ) ) - vshft = np.dot( mati, rhs ) + U, s, V = np.linalg.svd(mat) + one_over_s = np.where(s / s.max() < 1e-6, 1, 1.0 / s) + mati = np.dot(U, np.dot(np.diag(one_over_s), V)) + vshft = np.dot(mati, rhs) vref = vref + vshft - if m.sum()==0: + if m.sum() == 0: break slen = np.sqrt((vshft * vshft).sum()) - vlen = np.sqrt((vref*vref).sum()) - if vlen < precision or abs(slen/vlen) < precision: + vlen = np.sqrt((vref * vref).sum()) + if vlen < precision or abs(slen / vlen) < precision: break return vref - + class grid: - def __init__(self, npx = 128, mr = 1.0 ,nsig = 5): + def __init__(self, npx=128, mr=1.0, nsig=5): """ Set up the grid to use (the large unit cell) npx - number of points in the grid @@ -108,10 +115,10 @@ def __init__(self, npx = 128, mr = 1.0 ,nsig = 5): """ self.npx = npx self.nsig = nsig - self.grid = np.zeros((npx,npx,npx),np.float32) - self.old_grid = np.zeros((npx,npx,npx),np.float32) - self.cell_size = npx * mr / 2. - logging.info("Using an FFT unit cell of %s"%(str(self.cell_size))) + self.grid = np.zeros((npx, npx, npx), np.float32) + self.old_grid = np.zeros((npx, npx, npx), np.float32) + self.cell_size = npx * mr / 2.0 + logging.info("Using an FFT unit cell of %s" % (str(self.cell_size))) def gv_to_grid_new(self, gv): """ @@ -122,12 +129,12 @@ def gv_to_grid_new(self, gv): logging.info("Gridding data") self.gv = gv hrkrlr = self.cell_size * gv - hkl = np.round( hrkrlr ).astype(int) + hkl = np.round(hrkrlr).astype(int) # Filter to have the peaks in asym unit # ... do we need to do this? What about wrapping? - hmx = hkl.max(axis=1) < (self.npx/2. - 2.) - hmn = hkl.min(axis=1) > (2. - self.npx/2.) - my_g = np.compress( hmx & hmn, gv, axis=0 ) + hmx = hkl.max(axis=1) < (self.npx / 2.0 - 2.0) + hmn = hkl.min(axis=1) > (2.0 - self.npx / 2.0) + my_g = np.compress(hmx & hmn, gv, axis=0) # Compute hkl indices in the fft unit cell using filtered peaks hrkrlr = self.cell_size * my_g # Integer part of hkl (eg 1.0 from 1.9) @@ -137,140 +144,143 @@ def gv_to_grid_new(self, gv): grid = self.grid start = time.time() # Loop over corners with respect to floor corner - ng = grid.shape[0]*grid.shape[1]*grid.shape[2] - flatgrid = grid.reshape( ng ) - for cor in [ (0,0,0), #1 - (1,0,0), #2 - (0,1,0), #3 - (0,0,1), #4 - (1,1,0), #5 - (1,0,1), #6 - (0,1,1), #7 - (1,1,1) ]: #8 + ng = grid.shape[0] * grid.shape[1] * grid.shape[2] + flatgrid = grid.reshape(ng) + for cor in [ + (0, 0, 0), # 1 + (1, 0, 0), # 2 + (0, 1, 0), # 3 + (0, 0, 1), # 4 + (1, 1, 0), # 5 + (1, 0, 1), # 6 + (0, 1, 1), # 7 + (1, 1, 1), + ]: # 8 # The corner thkl = hkl + cor fac = 1 - abs(remain - cor) - vol = abs(fac[:,0]*fac[:,1]*fac[:,2]).astype(np.float32) + vol = abs(fac[:, 0] * fac[:, 1] * fac[:, 2]).astype(np.float32) thkl = np.where(thkl < 0, self.npx + thkl, thkl) - ind = thkl[:,0]*grid.shape[1]*grid.shape[2] + \ - thkl[:,1]*grid.shape[1] + \ - thkl[:,2] - - cImageD11.put_incr( flatgrid , ind.astype(np.intp), vol ) - logging.info("Grid filling loop takes "+str(time.time()-start)+" /s") - + ind = ( + thkl[:, 0] * grid.shape[1] * grid.shape[2] + + thkl[:, 1] * grid.shape[1] + + thkl[:, 2] + ) + cImageD11.put_incr(flatgrid, ind.astype(np.intp), vol) + logging.info("Grid filling loop takes " + str(time.time() - start) + " /s") def fft(self): - """ Compute the Patterson """ + """Compute the Patterson""" start = time.time() - self.patty = np.ascontiguousarray(abs(np.fft.fftn(self.grid)), - np.float32) - logging.info("Time for fft "+str(time.time()-start)) - self.origin = self.patty[0,0,0] - logging.info("Patterson origin height is :"+str(self.origin)) + self.patty = np.ascontiguousarray(abs(np.fft.fftn(self.grid)), np.float32) + logging.info("Time for fft " + str(time.time() - start)) + self.origin = self.patty[0, 0, 0] + logging.info("Patterson origin height is :" + str(self.origin)) def props(self): - """ Print some properties of the Patterson """ - logging.info("Patterson info "+str(self.patty.shape) - +str(type(self.patty))) + """Print some properties of the Patterson""" + logging.info("Patterson info " + str(self.patty.shape) + str(type(self.patty))) p = np.ravel(self.patty) m = np.mean(p) - logging.info("Average: %f"%(m)) - p2 = p*p + logging.info("Average: %f" % (m)) + p2 = p * p self.mean = m - v = np.sqrt( (np.sum(p2) - m*m*len(p) ) /(len(p)-1) ) - logging.info("Sigma: %f"%(v)) + v = np.sqrt((np.sum(p2) - m * m * len(p)) / (len(p) - 1)) + logging.info("Sigma: %f" % (v)) self.sigma = v def peaksearch(self, peaksfile): - """ Peaksearch in the Patterson """ - lio = labelimage.labelimage( (self.npx, self.npx), - peaksfile ) - logging.info("Peaksearching at %f sigma"%(self.nsig)) - thresh = self.mean + self.nsig * self.sigma + """Peaksearch in the Patterson""" + lio = labelimage.labelimage((self.npx, self.npx), peaksfile) + logging.info("Peaksearching at %f sigma" % (self.nsig)) + thresh = self.mean + self.nsig * self.sigma for i in range(self.npx): - lio.peaksearch(self.patty[i], - thresh, - i) + lio.peaksearch(self.patty[i], thresh, i) lio.mergelast() lio.finalise() def pv(self, v): - """ print vector """ - return ("%8.4f "*3)%tuple(v) + """print vector""" + return ("%8.4f " * 3) % tuple(v) def reduce(self, vecs): raise Exception("You want lattice_reduction instead") def read_peaks(self, peaksfile): - """ Read in the peaks from a peaksearch """ + """Read in the peaks from a peaksearch""" start = time.time() colf = columnfile.columnfile(peaksfile) - logging.info("reading file %f/s"%(time.time()-start)) + logging.info("reading file %f/s" % (time.time() - start)) # hmm - is this the right way around? - self.rlgrid = 1.0*self.cell_size/self.npx + self.rlgrid = 1.0 * self.cell_size / self.npx self.px = colf.omega - self.px = np.where(self.px > self.npx/2 , - self.px - self.npx , - self.px)*self.rlgrid + self.px = ( + np.where(self.px > self.npx / 2, self.px - self.npx, self.px) * self.rlgrid + ) self.py = colf.sc - self.py = np.where(self.py > self.npx/2 , - self.py - self.npx , - self.py)*self.rlgrid + self.py = ( + np.where(self.py > self.npx / 2, self.py - self.npx, self.py) * self.rlgrid + ) self.pz = colf.fc - self.pz = np.where(self.pz > self.npx/2 , - self.pz - self.npx , - self.pz)*self.rlgrid - self.UBIALL = np.array( [self.px, self.py, self.pz] ).T - logging.info("Number of peaks found %d %f/s, now fit some"%( - self.px.shape[0],time.time()-start)) + self.pz = ( + np.where(self.pz > self.npx / 2, self.pz - self.npx, self.pz) * self.rlgrid + ) + self.UBIALL = np.array([self.px, self.py, self.pz]).T + logging.info( + "Number of peaks found %d %f/s, now fit some" + % (self.px.shape[0], time.time() - start) + ) for i in range(colf.nrows): - print(".",end="") - self.UBIALL[i] = refine_vector( self.UBIALL[i], self.gv ) - - logging.info("Fitting vectors %f /s"%(time.time()-start)) + print(".", end="") + self.UBIALL[i] = refine_vector(self.UBIALL[i], self.gv) + + logging.info("Fitting vectors %f /s" % (time.time() - start)) self.colfile = colf - + def slow_score(self): logging.info("running slow_score") import time + start = time.time() - scores = np.dot( self.UBIALL, np.transpose( self.gv ) ) - scores_int = np.floor( scores + 0.5).astype(int) + scores = np.dot(self.UBIALL, np.transpose(self.gv)) + scores_int = np.floor(scores + 0.5).astype(int) diff = scores - scores_int nv = len(self.UBIALL) - print("scoring",nv,time.time()-start) + print("scoring", nv, time.time() - start) self.tol = 0.1 - scores = np.sqrt(np.average(diff*diff, axis = 1)) - n_ind = np.where(np.absolute(diff)< self.tol, 1 , 0) + scores = np.sqrt(np.average(diff * diff, axis=1)) + n_ind = np.where(np.absolute(diff) < self.tol, 1, 0) nind = np.sum(n_ind, axis=1) order = np.argsort(nind)[::-1] - mag_v = np.sqrt( self.px * self.px + - self.py * self.py + - self.pz * self.pz ) - f = open("fft.pks","w") + mag_v = np.sqrt(self.px * self.px + self.py * self.py + self.pz * self.pz) + f = open("fft.pks", "w") for i in range(nv): j = order[i] - f.write("%d %9.5f %9.5f %9.5f %9.5f %9.5f %9.5f %7d "%( - i, - self.px[j], - self.py[j], - self.pz[j], - mag_v[j], - scores[j], - self.colfile.sum_intensity[j], - nind[j] - )) - for k in range(i+1,nv): + f.write( + "%d %9.5f %9.5f %9.5f %9.5f %9.5f %9.5f %7d " + % ( + i, + self.px[j], + self.py[j], + self.pz[j], + mag_v[j], + scores[j], + self.colfile.sum_intensity[j], + nind[j], + ) + ) + for k in range(i + 1, nv): l = order[k] - nij = np.sum( n_ind[j] * n_ind[l] ) - f.write("%4d : %-7d "%(k,nij)) + nij = np.sum(n_ind[j] * n_ind[l]) + f.write("%4d : %-7d " % (k, nij)) f.write("\n") f.close() print(diff.shape) return diff + + ################################################### ## sum_sq_x = 0 ## sum_sq_y = 0 @@ -293,25 +303,21 @@ def slow_score(self): ################################################### - - - - - # sm = np.zeros( (len(diff), len(diff)), float) - # for k in range(len(diff)): - # i = order[k] - # sm[i,i] = np.dot(diff[i], diff[i]) - # for k in range(len(diff)-1): - # i = order[k] - # for l in range(i+1, len(diff)): - # j = order[l] - # sm[i,j] = np.dot(diff[i],diff[j])/sm[i,i]/sm[j,j] - # sm[j,i] = sm[i,j] - # for i in range(len(diff)): - # sm[i,i] = 1. - # print(sm[:5,:5]) - # print("Scoring takes",time.time()-start) - # return sm +# sm = np.zeros( (len(diff), len(diff)), float) +# for k in range(len(diff)): +# i = order[k] +# sm[i,i] = np.dot(diff[i], diff[i]) +# for k in range(len(diff)-1): +# i = order[k] +# for l in range(i+1, len(diff)): +# j = order[l] +# sm[i,j] = np.dot(diff[i],diff[j])/sm[i,i]/sm[j,j] +# sm[j,i] = sm[i,j] +# for i in range(len(diff)): +# sm[i,i] = 1. +# print(sm[:5,:5]) +# print("Scoring takes",time.time()-start) +# return sm ##### # To Do @@ -325,47 +331,46 @@ def slow_score(self): # ngvecs * npeaks == 175 * 200,000 K - 10^7 = 10Mpixel image ? # + def test(options): gvfile = options.gvfile mr = options.max_res npx = options.npx nsig = options.nsig from ImageD11 import indexing + print(npx, mr, nsig) - go = grid(npx = npx , mr = mr , nsig = nsig) + go = grid(npx=npx, mr=mr, nsig=nsig) io = indexing.indexer() io.readgvfile(gvfile) - go.gv_to_grid_new(io.gv ) + go.gv_to_grid_new(io.gv) go.fft() go.props() - go.peaksearch(open(gvfile+".patterson_pks","w")) - im = go.read_peaks(gvfile+".patterson_pks") - #print "Before reduction", len(go.UBIALL) - #sys.stdout.flush() - #ubinew = go.reduce(go.UBIALL) - #print "After reduction", len(ubinew) - #go.UBIALL = ubinew + go.peaksearch(open(gvfile + ".patterson_pks", "w")) + # im = go.read_peaks(gvfile + ".patterson_pks") + # print "Before reduction", len(go.UBIALL) + # sys.stdout.flush() + # ubinew = go.reduce(go.UBIALL) + # print "After reduction", len(ubinew) + # go.UBIALL = ubinew go.slow_score() - #from matplotlib.pylab import imshow, show - #imshow(im) - #show() - #return im, go + # from matplotlib.pylab import imshow, show + # imshow(im) + # show() + # return im, go sys.exit() - go.gv_to_grid_old(io.gv) diff = np.ravel(go.grid - go.old_grid) - print("All OK if this is zero",np.add.reduce(diff)) - print("error at",np.argmax(diff),diff.max(),np.argmin(diff),diff.min()) - - + print("All OK if this is zero", np.add.reduce(diff)) + print("error at", np.argmax(diff), diff.max(), np.argmin(diff), diff.min()) - -if __name__=="__main__": +if __name__ == "__main__": import sys + class options: max_res = 0.7 npx = 128 diff --git a/ImageD11/finite_strain.py b/ImageD11/finite_strain.py index 97b468e3..a7e68de3 100644 --- a/ImageD11/finite_strain.py +++ b/ImageD11/finite_strain.py @@ -1,4 +1,3 @@ - from __future__ import print_function # ImageD11 Software for beamline ID11 @@ -18,121 +17,116 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -import numpy as np, math +import numpy as np -# helpers +# helpers def e6_to_symm(e): - """ Follows the eps convention from xfab + """Follows the eps convention from xfab eps = [e11, e12, e13, e22, e23, e33] """ e11, e12, e13, e22, e23, e33 = e - return np.array(((e11,e12,e13), - (e12,e22,e23), - (e13,e23,e33))) + return np.array(((e11, e12, e13), (e12, e22, e23), (e13, e23, e33))) + def symm_to_e6(m): - """ Follows the eps convention from xfab + """Follows the eps convention from xfab eps = [e11, e12, e13, e22, e23, e33] """ - return np.array( ( m[0,0], m[0,1], m[0,2], - m[1,1], m[1,2], - m[2,2] ) ) + return np.array((m[0, 0], m[0, 1], m[0, 2], m[1, 1], m[1, 2], m[2, 2])) + -def cell_to_B( cell ): +def cell_to_B(cell): from ImageD11.unitcell import unitcell - B = unitcell( cell ).B + + B = unitcell(cell).B return B -class DeformationGradientTensor( object ): - + +class DeformationGradientTensor(object): def __init__(self, ubi, ub0): """ see docs/DeformationGradientTensor.ipynb F = dot( ubi.T, ub0.T ) F = ui.bi.b0.u0 """ - assert ubi.shape == (3,3) - assert ub0.shape == (3,3) - self.F = np.dot( ubi.T, ub0.T ) + assert ubi.shape == (3, 3) + assert ub0.shape == (3, 3) + self.F = np.dot(ubi.T, ub0.T) self._svd = None self._vrs = None @property def SVD(self): - """ Returns the singular value decomposition of F """ + """Returns the singular value decomposition of F""" if self._svd is None: self._svd = np.linalg.svd(self.F) return self._svd @property def VRS(self): - """ Returns the Polar decomposition of F=V.R=R.S + """Returns the Polar decomposition of F=V.R=R.S with R as a rotation matrix and V, S as symmetric """ if self._vrs is None: - w,sing,vh = self.SVD - R = np.dot( w, vh ) - S = np.dot( vh.T, np.dot( np.diag(sing), vh ) ) - V = np.dot( w , np.dot( np.diag(sing), w.T ) ) - self._vrs = V,R,S + w, sing, vh = self.SVD + R = np.dot(w, vh) + S = np.dot(vh.T, np.dot(np.diag(sing), vh)) + V = np.dot(w, np.dot(np.diag(sing), w.T)) + self._vrs = V, R, S return self._vrs @property def U(self): - """ Returns the Busing and Levy U matrix relating ubi to ub0 """ - v,r,s = self.VRS + """Returns the Busing and Levy U matrix relating ubi to ub0""" + v, r, s = self.VRS return r - + def finite_strain_ref(self, m=0.5): """ Returns the finite strain in the reference co-ordinate system - if the polar decomposition gives : + if the polar decomposition gives : F = V.R = R.S = ui.bi.b0.u0 Ft.F removes ui final orientation effect Em = (S^m - I )/2m """ - m2 = int(round(m*2)) - assert np.allclose( m2 * 0.5, m ) + m2 = int(round(m * 2)) + assert np.allclose(m2 * 0.5, m) if m2 == 0: - u,s,vt = self.SVD # == F - logs = np.diag( np.log( s ) ) - logFFT = np.dot( np.dot( vt.T, logs ), vt ) - Em = logFFT*0.5 - elif (m2 % 2) == 0: # No SVD in this path + u, s, vt = self.SVD # == F + logs = np.diag(np.log(s)) + logFFT = np.dot(np.dot(vt.T, logs), vt) + Em = logFFT * 0.5 + elif (m2 % 2) == 0: # No SVD in this path m = int(round(m)) - Cm = np.linalg.matrix_power( np.dot( self.F.T, self.F ), m ) - Em = (Cm - np.eye(3))/m2 + Cm = np.linalg.matrix_power(np.dot(self.F.T, self.F), m) + Em = (Cm - np.eye(3)) / m2 else: - V,R,S = self.VRS # == F + V, R, S = self.VRS # == F Um = np.linalg.matrix_power(S, m2) - Em = (Um - np.eye(3))/m2 + Em = (Um - np.eye(3)) / m2 return Em - + def finite_strain_lab(self, m=0.5): """ Returns the finite strain in the lab co-ordinate system - if the polar decomposition gives : + if the polar decomposition gives : F = V.R = R.S = ui.bi.b0.u0 F.Ft removes u0 initial orientation effect Em = (V^m - I )/2m """ - m2 = int(round(m*2)) - assert np.allclose( m2 * 0.5, m ) + m2 = int(round(m * 2)) + assert np.allclose(m2 * 0.5, m) if m2 == 0: - u,s,vt = self.SVD # == F - logs = np.diag( np.log( s ) ) - logFTF = np.dot( np.dot( u, logs ), u.T ) - em = logFTF*0.5 - elif (m2 % 2) == 0: # No SVD in this path + u, s, vt = self.SVD # == F + logs = np.diag(np.log(s)) + logFTF = np.dot(np.dot(u, logs), u.T) + em = logFTF * 0.5 + elif (m2 % 2) == 0: # No SVD in this path m = int(round(m)) - Bm = np.linalg.matrix_power( np.dot( self.F, self.F.T ), m ) - em = (Bm - np.eye(3))/m2 + Bm = np.linalg.matrix_power(np.dot(self.F, self.F.T), m) + em = (Bm - np.eye(3)) / m2 else: - V,R,S = self.VRS # == F + V, R, S = self.VRS # == F Vm = np.linalg.matrix_power(V, m2) - em = (Vm - np.eye(3))/m2 + em = (Vm - np.eye(3)) / m2 return em - - - - diff --git a/ImageD11/grain.py b/ImageD11/grain.py index b0bbcca0..988e57e2 100644 --- a/ImageD11/grain.py +++ b/ImageD11/grain.py @@ -21,46 +21,42 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -import numpy as np, math +import numpy as np import ImageD11.indexing, ImageD11.unitcell, ImageD11.finite_strain import xfab.tools # helpers : put these into xfab.tools at some point? def e6_to_symm(e): - """ Follows the eps convention from xfab + """Follows the eps convention from xfab eps = [e11, e12, e13, e22, e23, e33] """ e11, e12, e13, e22, e23, e33 = e - return np.array(((e11,e12,e13), - (e12,e22,e23), - (e13,e23,e33))) + return np.array(((e11, e12, e13), (e12, e22, e23), (e13, e23, e33))) + def symm_to_e6(m): - """ Follows the eps convention from xfab + """Follows the eps convention from xfab eps = [e11, e12, e13, e22, e23, e33] """ - return np.array( ( m[0,0], m[0,1], m[0,2], - m[1,1], m[1,2], - m[2,2] ) ) - + return np.array((m[0, 0], m[0, 1], m[0, 2], m[1, 1], m[1, 2], m[2, 2])) class grain: - def __init__(self,ubi,translation=None, **kwds): + def __init__(self, ubi, translation=None, **kwds): if translation is None: - # If translation has not been read from ubi file make it + # If translation has not been read from ubi file make it # be None to avoid confusion with a grain which is known # to be at [0,0,0] self.translation = None else: - self.translation = np.array(translation,float) + self.translation = np.array(translation, float) self.set_ubi(ubi) def set_ubi(self, ubi): - """ Update the orientation and clear cached values """ - self.ubi = np.array(ubi,float) - assert np.linalg.det(self.ubi) >= 0, 'Left handed axis system!' - self.clear_cache() + """Update the orientation and clear cached values""" + self.ubi = np.array(ubi, float) + assert np.linalg.det(self.ubi) >= 0, "Left handed axis system!" + self.clear_cache() def clear_cache(self): # We will cache a bunch of things and access them @@ -77,9 +73,9 @@ def clear_cache(self): self._rmt = None self._unitcell = None - @property + @property def UB(self): - """ The UB matrix from Busing and Levy + """The UB matrix from Busing and Levy columns are the reciprocal space lattice vectors """ if self._UB is None: @@ -93,57 +89,57 @@ def ub(self): @property def B(self): if self._B is None: - self._B = ImageD11.unitcell.unitcell( self.unitcell ).B.copy() + self._B = ImageD11.unitcell.unitcell(self.unitcell).B.copy() return self._B.copy() @property def U(self): - """ The orientation matrix (U) from Busing and Levy """ + """The orientation matrix (U) from Busing and Levy""" if self._U is None: # ubi = inv(UB) = inv(B)inv(U) - self._U = np.dot( self.B, self.ubi ).T + self._U = np.dot(self.B, self.ubi).T return self._U.copy() @property def u(self): return self.U - + @property def Rod(self): - """ A Rodriguez vector. + """A Rodriguez vector. Length proportional to angle, direction is axis""" if self._rod is None: - self._rod = xfab.tools.u_to_rod( self.U ) + self._rod = xfab.tools.u_to_rod(self.U) return self._rod.copy() @property def mt(self): - """Metric tensor """ + """Metric tensor""" if self._mt is None: - self._mt = np.dot( self.ubi, self.ubi.T ) + self._mt = np.dot(self.ubi, self.ubi.T) return self._mt.copy() @property def rmt(self): - """ Reciprocal metric tensor """ + """Reciprocal metric tensor""" if self._rmt is None: - self._rmt = np.linalg.inv( self.mt ) + self._rmt = np.linalg.inv(self.mt) return self._rmt.copy() - + @property def unitcell(self): - """ a,b,c,alpha,beta,gamma """ + """a,b,c,alpha,beta,gamma""" if self._unitcell is None: G = self.mt - a, b, c = np.sqrt( np.diag( G ) ) - al = np.degrees( np.arccos( G[1,2]/b/c ) ) - be = np.degrees( np.arccos( G[0,2]/a/c ) ) - ga = np.degrees( np.arccos( G[0,1]/a/b ) ) - self._unitcell = np.array( (a,b,c,al,be,ga) ) + a, b, c = np.sqrt(np.diag(G)) + al = np.degrees(np.arccos(G[1, 2] / b / c)) + be = np.degrees(np.arccos(G[0, 2] / a / c)) + ga = np.degrees(np.arccos(G[0, 1] / a / b)) + self._unitcell = np.array((a, b, c, al, be, ga)) return self._unitcell.copy() def eps_grain_matrix(self, dzero_cell, m=0.5): - """ dzero_cell can be another grain or cell parameters + """dzero_cell can be another grain or cell parameters [a,b,c,alpha,beta,gamma] m is the exponent for the Seth-Hill finite strain tensors E = 1/2m (U^2m - I) @@ -151,16 +147,16 @@ def eps_grain_matrix(self, dzero_cell, m=0.5): Returns eps as a symmetric matrix ... in the grain reference system of dzero_cell """ - if hasattr( dzero_cell, "UB" ): + if hasattr(dzero_cell, "UB"): B = dzero_cell.UB else: - B = ImageD11.unitcell.unitcell( dzero_cell ).B - F = ImageD11.finite_strain.DeformationGradientTensor( self.ubi, B ) - eps = F.finite_strain_ref( m ) + B = ImageD11.unitcell.unitcell(dzero_cell).B + F = ImageD11.finite_strain.DeformationGradientTensor(self.ubi, B) + eps = F.finite_strain_ref(m) return eps def eps_grain(self, dzero_cell, m=0.5): - """ dzero_cell can be another grain or cell parameters + """dzero_cell can be another grain or cell parameters [a,b,c,alpha,beta,gamma] m is the exponent for the Seth-Hill finite strain tensors E = 1/2m (U^2m - I) @@ -169,12 +165,11 @@ def eps_grain(self, dzero_cell, m=0.5): e11 e12 e13 e22 e23 e33 ... in the grain reference system of dzero_cell """ - E = self.eps_grain_matrix( dzero_cell, m ) - return symm_to_e6( E ) - + E = self.eps_grain_matrix(dzero_cell, m) + return symm_to_e6(E) def eps_sample_matrix(self, dzero_cell, m=0.5): - """ dzero_cell can be another grain or cell parameters: + """dzero_cell can be another grain or cell parameters: [a,b,c,alpha,beta,gamma] m is the exponent for the Seth-Hill finite strain tensors E = 1/2m (V^2m - I) @@ -182,17 +177,16 @@ def eps_sample_matrix(self, dzero_cell, m=0.5): Returns eps as a symmetric matrix ... in the sample system (z up, x along the beam at omega=0) """ - if hasattr( dzero_cell, "UB" ): + if hasattr(dzero_cell, "UB"): B = dzero_cell.UB else: - B = ImageD11.unitcell.unitcell( dzero_cell ).B - F = ImageD11.finite_strain.DeformationGradientTensor( self.ubi, B ) - eps = F.finite_strain_lab( m ) + B = ImageD11.unitcell.unitcell(dzero_cell).B + F = ImageD11.finite_strain.DeformationGradientTensor(self.ubi, B) + eps = F.finite_strain_lab(m) return eps - def eps_sample(self, dzero_cell, m=0.5): - """ dzero_cell can be another grain or cell parameters: + """dzero_cell can be another grain or cell parameters: [a,b,c,alpha,beta,gamma] m is the exponent for the Seth-Hill finite strain tensors E = 1/2m (V^2m - I) @@ -201,66 +195,70 @@ def eps_sample(self, dzero_cell, m=0.5): e11 e12 e13 e22 e23 e33 ... in the sample system (z up, x along the beam at omega=0) """ - E = self.eps_sample_matrix( dzero_cell, m ) - return symm_to_e6( E ) - - + E = self.eps_sample_matrix(dzero_cell, m) + return symm_to_e6(E) - def write_grain_file(filename, list_of_grains): f = open(filename, "w") for g in list_of_grains: t = g.translation - f.write("#translation: %g %g %g\n"%(t[0],t[1],t[2])) - if hasattr(g,"name"): - f.write("#name %s\n"%(g.name.rstrip())) - if hasattr(g,"intensity_info"): - f.write("#intensity_info %s\n"%(g.intensity_info.rstrip())) - if hasattr(g,"npks"): - f.write("#npks %d\n"%(int(g.npks))) - if hasattr(g,"nuniq"): - f.write("#nuniq %d\n"%(int(g.nuniq))) - if hasattr(g,"Rod"): + f.write("#translation: %g %g %g\n" % (t[0], t[1], t[2])) + if hasattr(g, "name"): + f.write("#name %s\n" % (g.name.rstrip())) + if hasattr(g, "intensity_info"): + f.write("#intensity_info %s\n" % (g.intensity_info.rstrip())) + if hasattr(g, "npks"): + f.write("#npks %d\n" % (int(g.npks))) + if hasattr(g, "nuniq"): + f.write("#nuniq %d\n" % (int(g.nuniq))) + if hasattr(g, "Rod"): try: - f.write("#Rod %f %f %f\n"%tuple([float(r) for r in g.Rod])) + f.write("#Rod %f %f %f\n" % tuple([float(r) for r in g.Rod])) except: - f.write("#Rod %s"%(g.Rod)) + f.write("#Rod %s" % (g.Rod)) f.write("#UBI:\n") u = g.ubi # More than float32 precision - f.write("%.9g %.9g %.9g\n" %(u[0,0],u[0,1],u[0,2])) - f.write("%.9g %.9g %.9g\n" %(u[1,0],u[1,1],u[1,2])) - f.write("%.9g %.9g %.9g\n\n"%(u[2,0],u[2,1],u[2,2])) + f.write("%.9g %.9g %.9g\n" % (u[0, 0], u[0, 1], u[0, 2])) + f.write("%.9g %.9g %.9g\n" % (u[1, 0], u[1, 1], u[1, 2])) + f.write("%.9g %.9g %.9g\n\n" % (u[2, 0], u[2, 1], u[2, 2])) f.close() + def read_grain_file(filename): - """read ubifile and return a list of ubi arrays """ + """read ubifile and return a list of ubi arrays""" f = open(filename, "r") grainsread = [] u = [] t = None p = {} for line in f: - if line.find("#translation:")==0: - t = [ float(x) for x in line.split()[1:]] + if line.find("#translation:") == 0: + t = [float(x) for x in line.split()[1:]] continue - if line[0] == "#" and line.find("UBI")<0: - k,v=line[1:].split(" ",1) - p[k]=v + if line[0] == "#" and line.find("UBI") < 0: + k, v = line[1:].split(" ", 1) + p[k] = v continue - if line[0] == "#" and line.find("intensity_info")>-1: + if line[0] == "#" and line.find("intensity_info") > -1: p["intensity_info"] = line.split("intensity_info")[1].rstrip() - if line.find("#")==0: continue - vals = [ float(x) for x in line.split() ] + if line.find("#") == 0: + continue + vals = [float(x) for x in line.split()] if len(vals) == 3: u = u + [vals] - if len(u)==3: - grainsread.append( grain(u, t) ) - for k in ["name","npks","nuniq","intensity_info"]: # Rod - is recomputed when needed + if len(u) == 3: + grainsread.append(grain(u, t)) + for k in [ + "name", + "npks", + "nuniq", + "intensity_info", + ]: # Rod - is recomputed when needed if k in p: setattr(grainsread[-1], k, p[k]) - p={} + p = {} u = [] t = None f.close() diff --git a/ImageD11/grid_index_parallel.py b/ImageD11/grid_index_parallel.py index 91b57905..533e94d8 100644 --- a/ImageD11/grid_index_parallel.py +++ b/ImageD11/grid_index_parallel.py @@ -1,16 +1,14 @@ - - from __future__ import print_function -from ImageD11 import peakmerge, indexing, transformer, cImageD11 +from ImageD11 import indexing, transformer, cImageD11 from ImageD11 import grain, unitcell, refinegrains, sym_u import xfab.tools -import sys, os, numpy as np, time, random +import sys, os, numpy as np import multiprocessing, traceback from multiprocessing import Pool from multiprocessing import Queue as PQueue if sys.version_info[0] < 3: - import Queue # for exception + import Queue # for exception else: import queue as Queue @@ -20,32 +18,27 @@ nulfile = "/dev/null" - - - - -def domap( pars, - colfile, - grains, - gridpars): +def domap(pars, colfile, grains, gridpars): """ mapping function - does what makemap.py does, but in a function """ - if 'FITPOS' not in gridpars: - gridpars['FITPOS']=True - - OmSlop = gridpars['OMEGAFLOAT'] - OmFloat= OmSlop > 0 + if "FITPOS" not in gridpars: + gridpars["FITPOS"] = True + + OmSlop = gridpars["OMEGAFLOAT"] + OmFloat = OmSlop > 0 # - ss = sys.stdout # turns off printing - if gridpars['NUL']: - NUL = open(nulfile,"w") + ss = sys.stdout # turns off printing + if gridpars["NUL"]: + NUL = open(nulfile, "w") sys.stdout = NUL - for tol in gridpars['TOLSEQ']: - o = refinegrains.refinegrains( OmFloat = OmFloat, OmSlop = OmSlop, - tolerance = tol, - intensity_tth_range = (0,180), - ) + for tol in gridpars["TOLSEQ"]: + o = refinegrains.refinegrains( + OmFloat=OmFloat, + OmSlop=OmSlop, + tolerance=tol, + intensity_tth_range=(0, 180), + ) o.parameterobj = pars # o.loadfiltered ... o.scannames = ["internal"] @@ -58,202 +51,214 @@ def domap( pars, o.grainnames.append(i) o.ubisread[name] = g.ubi o.translationsread[name] = g.translation - if gridpars['SYMMETRY'] != "triclinic": - o.makeuniq( gridpars['SYMMETRY'] ) + if gridpars["SYMMETRY"] != "triclinic": + o.makeuniq(gridpars["SYMMETRY"]) o.generate_grains() - if gridpars['FITPOS']: + if gridpars["FITPOS"]: o.refinepositions() else: o.assignlabels() for key in o.grains.keys(): g = o.grains[key] - g.set_ubi( o.refine( g.ubi, quiet=False ) ) + g.set_ubi(o.refine(g.ubi, quiet=False)) # This fills in the uniq for each grain - o.savegrains( nulfile, sort_npks = False) - if 'NUNIQ' in gridpars: - keep = lambda g: g.nuniq > gridpars['NUNIQ'] and g.npks > gridpars['NPKS'] + o.savegrains(nulfile, sort_npks=False) + if "NUNIQ" in gridpars: + + def keep(g): + return g.nuniq > gridpars["NUNIQ"] and g.npks > gridpars["NPKS"] + else: - keep = lambda g: g.npks > gridpars['NPKS'] - gl = [ g for g in o.grains.values() if keep(g) ] + + def keep(g): + return g.npks > gridpars["NPKS"] + + gl = [g for g in o.grains.values() if keep(g)] if len(gl) == 0: break grains = gl - if gridpars['NUL']: + if gridpars["NUL"]: sys.stdout = ss return gl - -def doindex( gve, x, y, z, w, gridpars): +def doindex(gve, x, y, z, w, gridpars): """ Try to index some g-vectors """ - NPKS = gridpars['NPKS'] - UC = gridpars['UC'] - TOLSEQ = gridpars['TOLSEQ'] - COSTOL = gridpars[ 'COSTOL'] - DSTOL = gridpars[ 'DSTOL'] + NPKS = gridpars["NPKS"] + UC = gridpars["UC"] + TOLSEQ = gridpars["TOLSEQ"] + COSTOL = gridpars["COSTOL"] + DSTOL = gridpars["DSTOL"] if "2RFIT" in gridpars: - DOFIT = gridpars[ '2RFIT' ] + DOFIT = gridpars["2RFIT"] else: DOFIT = False - ss = sys.stdout # turns off printing - if gridpars['NUL']: - NUL = open(nulfile,"w") + ss = sys.stdout # turns off printing + if gridpars["NUL"]: + NUL = open(nulfile, "w") sys.stdout = NUL - myindexer = indexing.indexer( - wavelength = w, - unitcell = UC, - gv = gve.T - ) + myindexer = indexing.indexer(wavelength=w, unitcell=UC, gv=gve.T) # added in indexer.__init__ - #myindexer.ds = np.sqrt( (gve * gve).sum(axis=0) ) - #myindexer.ga = np.zeros(len(myindexer.ds),int)-1 # Grain assignments - for ring1 in gridpars['RING1']: - for ring2 in gridpars['RING2']: - myindexer.parameterobj.set_parameters( { - 'cosine_tol' : COSTOL, - 'ds_tol': DSTOL, - 'minpks': NPKS, - 'max_grains': 1000, - 'hkl_tol': TOLSEQ[0], - 'ring_1': ring1, - 'ring_2': ring2 - } ) - myindexer.loadpars( ) - myindexer.assigntorings( ) + # myindexer.ds = np.sqrt( (gve * gve).sum(axis=0) ) + # myindexer.ga = np.zeros(len(myindexer.ds),int)-1 # Grain assignments + for ring1 in gridpars["RING1"]: + for ring2 in gridpars["RING2"]: + myindexer.parameterobj.set_parameters( + { + "cosine_tol": COSTOL, + "ds_tol": DSTOL, + "minpks": NPKS, + "max_grains": 1000, + "hkl_tol": TOLSEQ[0], + "ring_1": ring1, + "ring_2": ring2, + } + ) + myindexer.loadpars() + myindexer.assigntorings() try: - myindexer.find( ) - myindexer.scorethem( fitb4 = DOFIT ) + myindexer.find() + myindexer.scorethem(fitb4=DOFIT) except: pass # filter out crap - vol = 1/np.linalg.det( UC.B ) - grains = [ grain.grain(ubi, [x,y,z]) for ubi in myindexer.ubis - if np.linalg.det(ubi) > vol*0.5 ] - if gridpars['NUL']: + vol = 1 / np.linalg.det(UC.B) + grains = [ + grain.grain(ubi, [x, y, z]) + for ubi in myindexer.ubis + if np.linalg.det(ubi) > vol * 0.5 + ] + if gridpars["NUL"]: sys.stdout = ss return grains -def test_many_points( args ): + +def test_many_points(args): """ Grid index - loop over points Places the results in a multiprocessing Queue """ colfile, parameters, translations, gridpars = args - s = "Hello from %s %d"%(multiprocessing.current_process().name ,os.getpid()) - s += " %d to do"%(len(translations)) - s += "%s %s"%(colfile, parameters) + s = "Hello from %s %d" % (multiprocessing.current_process().name, os.getpid()) + s += " %d to do" % (len(translations)) + s += "%s %s" % (colfile, parameters) print(s) mytransformer = transformer.transformer() - mytransformer.loadfiltered( colfile ) - mytransformer.loadfileparameters( parameters ) - w = mytransformer.parameterobj.get("wavelength") - first=True - ni = len(translations)/100.0 - for i,(t_x, t_y, t_z) in enumerate(translations): - mytransformer.updateparameters( ) - mytransformer.parameterobj.set_parameters( { - 't_x':t_x, 't_y':t_y, 't_z':t_z - } ) - mytransformer.compute_tth_eta( ) - mytransformer.computegv( ) + mytransformer.loadfiltered(colfile) + mytransformer.loadfileparameters(parameters) + w = mytransformer.parameterobj.get("wavelength") + first = True + ni = len(translations) / 100.0 + for i, (t_x, t_y, t_z) in enumerate(translations): + mytransformer.updateparameters() + mytransformer.parameterobj.set_parameters({"t_x": t_x, "t_y": t_y, "t_z": t_z}) + mytransformer.compute_tth_eta() + mytransformer.computegv() # mytransformer.savegv( tmp+".gve" ) - gve = np.vstack(( mytransformer.colfile.gx,mytransformer.colfile.gy,mytransformer.colfile.gz )) + gve = np.vstack( + ( + mytransformer.colfile.gx, + mytransformer.colfile.gy, + mytransformer.colfile.gz, + ) + ) if first: - first=False - grains = doindex(gve, t_x,t_y,t_z, w, gridpars) + first = False + grains = doindex(gve, t_x, t_y, t_z, w, gridpars) ng = len(grains) if ng > 0: - grains = domap( mytransformer.parameterobj , - mytransformer.colfile , - grains, - gridpars) + grains = domap( + mytransformer.parameterobj, mytransformer.colfile, grains, gridpars + ) nk = len(grains) if len(grains) > 0: - test_many_points.q.put( grains, False ) # do not wait + test_many_points.q.put(grains, False) # do not wait else: nk = 0 - sys.stderr.write(" % 6.2f%% Position %d %d %d"%(i/ni,t_x, t_y, t_z)+ - " grains found %d kept %d\n"%(ng, nk)) + sys.stderr.write( + " % 6.2f%% Position %d %d %d" % (i / ni, t_x, t_y, t_z) + + " grains found %d kept %d\n" % (ng, nk) + ) + - class uniq_grain_list(object): """ Cope with finding the same grain over and over... """ + def __init__(self, symmetry, toldist, tolangle, grains=None): - self.grp = getattr( sym_u, symmetry )() - self.dt2 = toldist*toldist - self.tar = np.radians(tolangle) + self.grp = getattr(sym_u, symmetry)() + self.dt2 = toldist * toldist + self.tar = np.radians(tolangle) self.uniqgrains = [] if grains is not None: - self.add( grains ) - + self.add(grains) + def add(self, grains): - for i,gnew in enumerate(grains): + for i, gnew in enumerate(grains): newgrain = True for gold in self.uniqgrains: dt = gnew.translation - gold.translation - dt2 =np.dot(dt,dt) + dt2 = np.dot(dt, dt) if dt2 > self.dt2: continue aumis = np.dot(gold.asymusT, gnew.U) - arg = (aumis[:,0,0]+aumis[:,1,1]+aumis[:,2,2] - 1. )/2. + arg = (aumis[:, 0, 0] + aumis[:, 1, 1] + aumis[:, 2, 2] - 1.0) / 2.0 angle = np.arccos(np.clip(arg, -1, 1)).min() if angle < self.tar: # too close in angle and space - print( " matched",i,np.degrees(angle),np.sqrt(dt2) ) + print(" matched", i, np.degrees(angle), np.sqrt(dt2)) gold.nfound += 1 newgrain = False break if newgrain: - self.append_uniq( gnew ) - - def append_uniq( self, g ): + self.append_uniq(gnew) + + def append_uniq(self, g): symubis = [np.dot(o, g.ubi) for o in self.grp.group] - g.asymusT = np.array([xfab.tools.ubi_to_u_b(ubi)[0].T for ubi in symubis]) + g.asymusT = np.array([xfab.tools.ubi_to_u_b(ubi)[0].T for ubi in symubis]) g.nfound = 1 - self.uniqgrains.append( g ) + self.uniqgrains.append(g) - -def initgrid( fltfile, parfile, tmp, gridpars ): + +def initgrid(fltfile, parfile, tmp, gridpars): """ Sets up a grid indexing by preparing the unitcell for indexing and checking the columns we want are in the colfile """ mytransformer = transformer.transformer() - mytransformer.loadfiltered( fltfile ) - mytransformer.loadfileparameters( parfile ) - gridpars[ 'UC' ] = unitcell.unitcell_from_parameters( mytransformer.parameterobj ) + mytransformer.loadfiltered(fltfile) + mytransformer.loadfileparameters(parfile) + gridpars["UC"] = unitcell.unitcell_from_parameters(mytransformer.parameterobj) col = mytransformer.colfile - if not "drlv2" in col.titles: - col.addcolumn( np.ones(col.nrows, float), - "drlv2" ) - if not "labels" in col.titles: - col.addcolumn( np.ones(col.nrows, float)-2, - "labels" ) - if not "sc" in col.titles: + if "drlv2" not in col.titles: + col.addcolumn(np.ones(col.nrows, float), "drlv2") + if "labels" in col.titles: + col.addcolumn(np.ones(col.nrows, float) - 2, "labels") + if "sc" not in col.titles: assert "xc" in col.titles - col.addcolumn( col.xc.copy(), "sc") - if not "fc" in col.titles: + col.addcolumn(col.xc.copy(), "sc") + if "fc" not in col.titles: assert "yc" in col.titles - col.addcolumn( col.yc.copy(), "fc") - mytransformer.colfile.writefile( "%s.flt"%(tmp)) + col.addcolumn(col.yc.copy(), "fc") + mytransformer.colfile.writefile("%s.flt" % (tmp)) return gridpars # Debugging multiprocessing - print the exceptions def wrap_test_many_points(x): try: - test_many_points( x ) + test_many_points(x) except Exception as e: - print('Caught exception in worker thread') + print("Caught exception in worker thread") # This prints the type, value, and stack trace of the # current exception being handled. traceback.print_exc() raise e + def wrap_test_many_points_init(q): """ This passes the q to the function @@ -263,7 +268,7 @@ def wrap_test_many_points_init(q): test_many_points.q = q -def grid_index_parallel( fltfile, parfile, tmp, gridpars, translations ): +def grid_index_parallel(fltfile, parfile, tmp, gridpars, translations): """ fltfile containing peaks parfile containing instrument geometry and unit cell @@ -275,26 +280,26 @@ def grid_index_parallel( fltfile, parfile, tmp, gridpars, translations ): splits workload over processes (blocks of translations to each process) This thread should catch results via a queue """ - gridpars = initgrid( fltfile, parfile, tmp, gridpars ) - print( "Done init" ) - if 'NPROC' not in gridpars or gridpars['NPROC'] is None: - NPR = multiprocessing.cpu_count() - 1 - cImageD11.cimaged11_omp_set_num_threads(2) # assume hyperthreading is useful? + gridpars = initgrid(fltfile, parfile, tmp, gridpars) + print("Done init") + if "NPROC" not in gridpars or gridpars["NPROC"] is None: + NPR = multiprocessing.cpu_count() - 1 + cImageD11.cimaged11_omp_set_num_threads(2) # assume hyperthreading is useful? else: - NPR = int(gridpars['NPROC']) - if 'NTHREAD' in gridpars: - cImageD11.cimaged11_omp_set_num_threads(int(gridpars['NTHREAD'])) + NPR = int(gridpars["NPROC"]) + if "NTHREAD" in gridpars: + cImageD11.cimaged11_omp_set_num_threads(int(gridpars["NTHREAD"])) elif NPR > 1: cImageD11.cimaged11_omp_set_num_threads(1) - tsplit = [ translations[i::NPR] for i in range(NPR) ] - args = [("%s.flt"%(tmp), parfile, t, gridpars) for i,t in enumerate(tsplit) ] + tsplit = [translations[i::NPR] for i in range(NPR)] + args = [("%s.flt" % (tmp), parfile, t, gridpars) for i, t in enumerate(tsplit)] q = PQueue() p = Pool(processes=NPR, initializer=wrap_test_many_points_init, initargs=[q]) - print( "Using a pool of",NPR,"processes" ) - pa = p.map_async( wrap_test_many_points, args ) - ul = uniq_grain_list( gridpars['SYMMETRY'], - gridpars['toldist'], - gridpars['tolangle'] ) + print("Using a pool of", NPR, "processes") + pa = p.map_async(wrap_test_many_points, args) + ul = uniq_grain_list( + gridpars["SYMMETRY"], gridpars["toldist"], gridpars["tolangle"] + ) lastsave = 0 while True: @@ -302,12 +307,12 @@ def grid_index_parallel( fltfile, parfile, tmp, gridpars, translations ): try: grs = q.get(True, 10) gb4 = len(ul.uniqgrains) - ul.add( grs ) - gnow = len(ul.uniqgrains) - print( "Got % 5d new %d from %d"%(gnow, gnow-gb4, len(grs) ) ) + ul.add(grs) + gnow = len(ul.uniqgrains) + print("Got % 5d new %d from %d" % (gnow, gnow - gb4, len(grs))) if len(ul.uniqgrains) > lastsave: - lastsave = len( ul.uniqgrains ) - grain.write_grain_file( "all"+tmp+".map", ul.uniqgrains ) + lastsave = len(ul.uniqgrains) + grain.write_grain_file("all" + tmp + ".map", ul.uniqgrains) if pa._number_left == 0: break except Queue.Empty: @@ -316,15 +321,16 @@ def grid_index_parallel( fltfile, parfile, tmp, gridpars, translations ): break except KeyboardInterrupt: break - # write here to be on the safe side .... - grain.write_grain_file( "all"+tmp+".map", ul.uniqgrains ) + # write here to be on the safe side .... + grain.write_grain_file("all" + tmp + ".map", ul.uniqgrains) p.close() p.join() -if __name__=="__main__": +if __name__ == "__main__": print("#Here is an example script") - print(""" + print( + """ import sys, random from ImageD11.grid_index_parallel import grid_index_parallel @@ -362,4 +368,5 @@ def grid_index_parallel( fltfile, parfile, tmp, gridpars, translations ): parfile = sys.argv[2] tmp = sys.argv[3] grid_index_parallel( fltfile, parfile, tmp, gridpars, translations ) -""") +""" + ) diff --git a/ImageD11/guicommand.py b/ImageD11/guicommand.py index 11ebb820..f69ab5b1 100644 --- a/ImageD11/guicommand.py +++ b/ImageD11/guicommand.py @@ -1,4 +1,3 @@ - from __future__ import print_function # ImageD11_v0.4 Software for beamline ID11 @@ -35,22 +34,25 @@ # To autoconvert arrays to lists for Java XMLRPC RETURN_NUMERICS = False import numpy as np + TYPE_NUMERIC = type(np.zeros(1)) + class guicommand: """ Keeps a log of all commands issued - separates gui code from algorithmical code """ + def __init__(self): - self.objects = { "peakmerger" : peakmerge.peakmerger(), - "transformer": transformer.transformer(), - "indexer" : indexing.indexer(), - "solver" : eps_sig_solver.solver(), - } - - self.commandscript = \ -"""# Create objects to manipulate - they hold your data + self.objects = { + "peakmerger": peakmerge.peakmerger(), + "transformer": transformer.transformer(), + "indexer": indexing.indexer(), + "solver": eps_sig_solver.solver(), + } + + self.commandscript = """# Create objects to manipulate - they hold your data # from ImageD11 import peakmerge, indexing, transformer, eps_sig_solver mypeakmerger = peakmerge.peakmerger() @@ -77,34 +79,47 @@ def execute(self, obj, command, *args, **kwds): if obj not in list(self.objects.keys()): raise Exception("ERROR! Unknown command object") o = self.objects[obj] - ran = "my%s.%s("% (obj, command) + ran = "my%s.%s(" % (obj, command) if command.find("."): subobjs = command.split(".")[:-1] for s in subobjs: - o = getattr(o,s) + o = getattr(o, s) command = command.split(".")[-1] - func = getattr(o,command) + func = getattr(o, command) try: addedcomma = "" for a in args: - ran="%s %s %s"%(ran,addedcomma,repr(a)) - addedcomma="," - for k,v in list(kwds.items()): - ran="%s %s %s=%s "%(ran,addedcomma,k,v) - addedcomma="," - ran+=" )\n" - logging.debug("Running: "+ran) + ran = "%s %s %s" % (ran, addedcomma, repr(a)) + addedcomma = "," + for k, v in list(kwds.items()): + ran = "%s %s %s=%s " % (ran, addedcomma, k, v) + addedcomma = "," + ran += " )\n" + logging.debug("Running: " + ran) sys.stdout.flush() ret = func(*args, **kwds) except: - logging.error("Exception occurred " + "self" + str(self) + - "obj" + str(obj)+ "command" + str(command) + - "func" + str(func) + "args" + str(args) + "kwds" + str(kwds)) + logging.error( + "Exception occurred " + + "self" + + str(self) + + "obj" + + str(obj) + + "command" + + str(command) + + "func" + + str(func) + + "args" + + str(args) + + "kwds" + + str(kwds) + ) import traceback + traceback.print_exc() return "Exception occured in the python " + ran - self.commandscript+=ran + self.commandscript += ran return ret def getdata(self, obj, name): @@ -118,7 +133,7 @@ def getdata(self, obj, name): """ if obj not in list(self.objects.keys()): raise Exception("ERROR! Unknown command object") - attribute = getattr(self.objects[obj],name) + attribute = getattr(self.objects[obj], name) if RETURN_NUMERICS: # Normally python will get this logging.debug("python return array") diff --git a/ImageD11/gv_general.py b/ImageD11/gv_general.py index afff26ff..7a8cdade 100644 --- a/ImageD11/gv_general.py +++ b/ImageD11/gv_general.py @@ -1,12 +1,9 @@ - from __future__ import print_function ## Automatically adapted for numpy.oldnumeric Sep 06, 2007 by alter_code1.py - - # ImageD11 Software for beamline ID11 # Copyright (C) 2005-2007 Jon Wright # @@ -34,8 +31,10 @@ # Use python -v myscript.py args # print("gv_general from ",__file__) + def angmod(a): - return np.arctan2( np.sin(a), np.cos(a) ) + return np.arctan2(np.sin(a), np.cos(a)) + class rotation_axis: """ @@ -45,32 +44,32 @@ class rotation_axis: potentially can add quaternions and euler etc later? """ - def __init__(self, direction, angle = 0.0): + + def __init__(self, direction, angle=0.0): """ direction is the rotation direction angle is the angle of rotation (degrees) """ self.direction = np.asarray(direction) - assert self.direction.shape == (3,) , \ - "direction.shape != 3, is it a vector??" + assert self.direction.shape == (3,), "direction.shape != 3, is it a vector??" mag = np.dot(self.direction, self.direction) - if abs(mag - 1.0) > 1e-5 : + if abs(mag - 1.0) > 1e-5: self.direction = self.direction / mag - logging.warning("Non-normalised direction vector "+str(direction)) + logging.warning("Non-normalised direction vector " + str(direction)) self.angle = angle self.matrix = self.to_matrix() - def rotate_vectors(self, vectors, angles = None): + def rotate_vectors(self, vectors, angles=None): """ Given a list of vectors, rotate them to new ones Angle is either self.angle, or 1 per vector, in degrees - + http://www.ecse.rpi.edu/Homepages/wrf/Research/Short_Notes/rotation.html p' = a . p a + (p - a . p a) cos q + a * p sin q - = p cos q + a . p a (1-cos q) + a * p sin q - point p, axis a, angle q - - + = p cos q + a . p a (1-cos q) + a * p sin q + point p, axis a, angle q + + http://mathworld.wolfram.com/RotationFormula.html r' = r cos(t) + n(n.r)(1-cos(t)) + rxn sin(t) """ @@ -83,14 +82,13 @@ def rotate_vectors(self, vectors, angles = None): assert q.shape[0] == p.shape[1] rp = p * np.cos(q) assert rp.shape == p.shape - a_dot_p = np.dot( self.direction, p) + a_dot_p = np.dot(self.direction, p) apa = np.outer(self.direction, a_dot_p) - rp += apa*(1-np.cos(q)) - rp += np.sin(q)*np.transpose( np.cross( - self.direction, np.transpose(p)) ) + rp += apa * (1 - np.cos(q)) + rp += np.sin(q) * np.transpose(np.cross(self.direction, np.transpose(p))) return rp - - def rotate_vectors_inverse(self, vectors, angles = None): + + def rotate_vectors_inverse(self, vectors, angles=None): """ Same as rotate vectors, but opposing sense """ @@ -108,109 +106,115 @@ def to_matrix(self): Returns a 3x3 rotation matrix R = I3cos(t) + ww^T (1-cos(t)) - W sin(t) t = angle - W = vector with hat operator = [ 0 -wz wy + W = vector with hat operator = [ 0 -wz wy wz 0 -wx -wy wx 0 ] - """ + """ # FIXME - check for caching dx, dy, dz = self.direction - e = np.array([self.direction]*3) - w = np.transpose(np.array( [ [ 0, -dz, dy ] , - [dz, 0, -dx] , - [-dy, dx, 0] ], float)) - st = math.sin( math.radians( self.angle )) - ct = math.cos( math.radians( self.angle )) - self.matrix = np.identity(3, float)*ct - st * w + \ - (1 - ct)*e*np.transpose(e) + e = np.array([self.direction] * 3) + w = np.transpose(np.array([[0, -dz, dy], [dz, 0, -dx], [-dy, dx, 0]], float)) + st = math.sin(math.radians(self.angle)) + ct = math.cos(math.radians(self.angle)) + self.matrix = ( + np.identity(3, float) * ct - st * w + (1 - ct) * e * np.transpose(e) + ) self.inversematrix = inv(self.matrix) return self.matrix -def axis_from_matrix( m ): + +def axis_from_matrix(m): """ - Factory method to create a rotation_axis object from a + Factory method to create a rotation_axis object from a direction cosine matrix http://en.wikipedia.org/wiki/Rotation_representation_%28mathematics%29 """ # should check for m being a pure rotation d = det(m) - assert abs(d - 1.0)<1e-6, "pure rotation? det(m) = %f"%(d) - arg = (m[0,0]+m[1,1]+m[2,2]-1.)/2.0 + assert abs(d - 1.0) < 1e-6, "pure rotation? det(m) = %f" % (d) + arg = (m[0, 0] + m[1, 1] + m[2, 2] - 1.0) / 2.0 if arg == 1: # identity matrix - oh bugger - direction is undefined angle_rad = 0.0 - direc = np.array([0,0,1],float) + direc = np.array([0, 0, 1], float) else: - angle_rad = math.acos(arg) - direc = np.array([m[2,1] - m[1,2], - m[0,2] - m[2,0], - m[1,0] - m[0,1] ], float ) + angle_rad = math.acos(arg) + direc = np.array( + [m[2, 1] - m[1, 2], m[0, 2] - m[2, 0], m[1, 0] - m[0, 1]], float + ) direc = direc / np.sqrt(np.dot(direc, direc)) - o = rotation_axis( direc , math.degrees( angle_rad ) ) + o = rotation_axis(direc, math.degrees(angle_rad)) if not (abs(o.matrix - m) < 1e-5).all(): - print("o.matrix\n",o.matrix) - print("m\n",m) + print("o.matrix\n", o.matrix) + print("m\n", m) raise Exception("error in axis_from_matrix") return o -rotate_identity = rotation_axis( np.array([0,0,1],float) , 0.0 ) + +rotate_identity = rotation_axis(np.array([0, 0, 1], float), 0.0) -def k_to_g( k , # scattering vectors in the laboratory - angles , # eg samome in degrees - axis = None, - # eg z axis - pre = None , - post = None ): +def k_to_g( + k, # scattering vectors in the laboratory + angles, # eg samome in degrees + axis=None, + # eg z axis + pre=None, + post=None, +): """ Computes g = pre . rot(axis, angle) . post . k Typically in ImageD11 post = [wedge][chi] and pre = identity since g = Omega.Chi.Wedge.k or k = Wedge-1.Chi-1.Omega-1.g - that is to say omega is directly under the sample and + that is to say omega is directly under the sample and wedge is at the bottom of the stack """ if axis is None: - raxis = rotation_axis( [0,0,1] ) + raxis = rotation_axis([0, 0, 1]) else: - raxis = rotation_axis( axis ) - assert k.shape[1] == angles.shape[0] , \ - "Number of vectors and scan axis must be same"+\ - str(k.shape)+str(angles.shape) - assert k.shape[0] == 3 , "k must be a [:,3] array" + raxis = rotation_axis(axis) + assert k.shape[1] == angles.shape[0], ( + "Number of vectors and scan axis must be same" + + str(k.shape) + + str(angles.shape) + ) + assert k.shape[0] == 3, "k must be a [:,3] array" if post is not None: - pk = np.dot(post, k ) - rpk = raxis.rotate_vectors(pk , angles) + pk = np.dot(post, k) + rpk = raxis.rotate_vectors(pk, angles) else: - rpk = raxis.rotate_vectors(k , angles) + rpk = raxis.rotate_vectors(k, angles) if pre is not None: - return np.dot(pre, rpk ) + return np.dot(pre, rpk) else: return rpk - - -def g_to_k( g, # g-vectors [3,:] - wavelength, # Needed - depends on curve of Ewald sphere! - axis = np.array([0,0,1], float), - pre = None, - post = None ): + +def g_to_k( + g, # g-vectors [3,:] + wavelength, # Needed - depends on curve of Ewald sphere! + axis=np.array([0, 0, 1], float), + pre=None, + post=None, +): """ - Get the k and angles given the g in + Get the k and angles given the g in g = pre . rot(axis, angle) . post . k g = omega . chi . wedge . k ...where k will satisfy the Laue equations - + The recipe is: Find the components of g along and perpendicular to the rotation axis - co-ords are a0 = axis, - a1 = axis x g, + co-ords are a0 = axis, + a1 = axis x g, a2 = a1 x axis = ( axis x g ) x axis Rotated vector will be a0 + a1 sin(angle) + a2 cos(angle) Laue condition says that [incident beam].[k] = k.sin(theta) - = 2 sin^2(theta) / lambda + = 2 sin^2(theta) / lambda = sin(t)/d = |g|.sin(t) = |g|*|g|*lambda / 2 - Apply any post rotations to the incident beam + Apply any post rotations to the incident beam Apply any pre-rotations to the g vector |g| = [a0 + a1 sin(angle) + a2 cos(angle) ] . [incident beam] => solve for angle @@ -228,17 +232,17 @@ def g_to_k( g, # g-vectors [3,:] # First deal with the pre and post rotations if pre is not None: # rg = pre.rotate_vectors_inverse(g) - rg = np.dot( pre, g ) + rg = np.dot(pre, g) else: rg = g assert rg.shape == g.shape beam = np.zeros(rg.shape, float) - beam[0,:] = -1./wavelength - beam[1,:] = 0. - beam[2,:] = 0. + beam[0, :] = -1.0 / wavelength + beam[1, :] = 0.0 + beam[2, :] = 0.0 if post is not None: # rb = post.rotate_vectors(beam) - rb = np.dot( post.T, beam ) + rb = np.dot(post.T, beam) else: rb = beam assert rb.shape == g.shape @@ -251,63 +255,59 @@ def g_to_k( g, # g-vectors [3,:] a0 = g - a2 assert a0.shape == a1.shape == a2.shape == g.shape # Dot product with incident beam - rbda0 = np.sum(rb * a0, 0) - rbda1 = np.sum(rb * a1, 0) + rbda0 = np.sum(rb * a0, 0) + rbda1 = np.sum(rb * a1, 0) rbda2 = np.sum(rb * a2, 0) assert rbda0.shape == rbda1.shape == rbda2.shape == (npeaks,) modg = np.sqrt(np.sum(g * g, 0)) - kdotbeam = -modg*modg/2. + kdotbeam = -modg * modg / 2.0 # print kdotbeam,"uyou" # k.b = rbda0 + rbda1.sin(t) + rbda2.cos(t) # a = rbda1 # b = rbda2 # c = kdotbeam - rbda0 - # From wikipedia: + # From wikipedia: # http://en.wikipedia.org/wiki/List_of_trigonometric_identities#Linear_combinations # a.sin(x) + b.cos(x) = sqrt(a*a+b*b) sin(x+p) # with p = atan(b/a) - phi = np.arctan2(rbda2,rbda1) - den = np.sqrt(rbda1*rbda1+rbda2*rbda2) - msk = (den <= 0) - quot = (kdotbeam - rbda0)/(den + msk) - valid = (~msk) & ( quot >= -1) & ( quot <= 1) - quot = np.where( valid, quot, 0 ) + phi = np.arctan2(rbda2, rbda1) + den = np.sqrt(rbda1 * rbda1 + rbda2 * rbda2) + msk = den <= 0 + quot = (kdotbeam - rbda0) / (den + msk) + valid = (~msk) & (quot >= -1) & (quot <= 1) + quot = np.where(valid, quot, 0) x_plus_p = np.arcsin(quot) sol1 = x_plus_p + phi sol2 = math.pi - x_plus_p + phi - # k + # k return np.degrees(angmod(sol1)), np.degrees(angmod(sol2)), valid def wedgemat(w): cw = np.cos(np.radians(w)) sw = np.sin(np.radians(w)) - W = np.array([[ cw , 0 , sw ], - [ 0 , 1 , 0 ], - [-sw , 0 , cw ]]) + W = np.array([[cw, 0, sw], [0, 1, 0], [-sw, 0, cw]]) return W + def chimat(c): cc = np.cos(np.radians(c)) sc = np.sin(np.radians(c)) - C = np.array([[ 1 , 0 , 0 ], - [ 0 , cc , sc ], - [ 0 , -sc , cc ]]) + C = np.array([[1, 0, 0], [0, cc, sc], [0, -sc, cc]]) return C - -def wedgechi(wedge=0., chi=0.): + +def wedgechi(wedge=0.0, chi=0.0): """ in transform: g = omega . chi . wedge .k """ - return np.dot( wedgemat(wedge), chimat(chi) ) + return np.dot(wedgemat(wedge), chimat(chi)) + -def chiwedge(chi=0., wedge=0.): +def chiwedge(chi=0.0, wedge=0.0): """ in transform: g = omega . chi . wedge .k """ - return np.dot( chimat(chi), wedgemat(wedge) ) - - + return np.dot(chimat(chi), wedgemat(wedge)) diff --git a/ImageD11/indexer.py b/ImageD11/indexer.py index ced7b7b3..48b82d0f 100644 --- a/ImageD11/indexer.py +++ b/ImageD11/indexer.py @@ -1,4 +1,3 @@ - from __future__ import print_function # ImageD11_v0.4 Software for beamline ID11 @@ -23,39 +22,45 @@ from ImageD11 import grain, transform, cImageD11, indexing, unitcell, refinegrains import scipy.optimize from scipy.spatial.transform import Rotation -import math, time, sys, logging +import sys + def unit(a): - """ Normalise vector """ - return a/np.sqrt(np.dot(a,a)) + """Normalise vector""" + return a / np.sqrt(np.dot(a, a)) -def get_tth( ds, wvln): - """ Convert 1/d to Bragg angle """ - return 2 * np.degrees( np.arcsin( ds * wvln / 2 ) ) + +def get_tth(ds, wvln): + """Convert 1/d to Bragg angle""" + return 2 * np.degrees(np.arcsin(ds * wvln / 2)) class indexer: """ A class for searching for orientation matrices """ - def __init__(self, - transformpars, - colfile, - ): - """ - """ + + def __init__( + self, + transformpars, + colfile, + ): + """ """ self.transformpars = transformpars - self.loadcolfile(colfile) + self.loadcolfile(colfile) self.setcell() self.reset() def setcell(self): - """ Sets the unit cell parameters for indexing """ - cp = [self.transformpars.get("cell_%s"%(s)) for s in - "_a _b _c alpha beta gamma".split()] - self.unitcell = unitcell.unitcell(cp, - self.transformpars.get("cell_lattice_[P,A,B,C,I,F,R]")) - + """Sets the unit cell parameters for indexing""" + cp = [ + self.transformpars.get("cell_%s" % (s)) + for s in "_a _b _c alpha beta gamma".split() + ] + self.unitcell = unitcell.unitcell( + cp, self.transformpars.get("cell_lattice_[P,A,B,C,I,F,R]") + ) + def loadcolfile(self, colfile): self.cf = colfile self.updatecolfile() @@ -66,84 +71,86 @@ def updatecolfile(self): if "xl" not in self.cf.titles: if "sc" in self.cf.titles: pks = self.cf.sc, self.cf.fc - elif "xc" in self.cf.titles: + elif "xc" in self.cf.titles: pks = self.cf.xc, self.cf.yc else: raise Exception("peaks file misses xc or sc") - xl,yl,zl = transform.compute_xyz_lab( pks, - **self.transformpars.parameters) - self.cf.addcolumn(xl,"xl") - self.cf.addcolumn(yl,"yl") - self.cf.addcolumn(zl,"zl") - self.peaks_xyzT = np.array((self.cf.xl,self.cf.yl,self.cf.zl)).T.copy() + xl, yl, zl = transform.compute_xyz_lab(pks, **self.transformpars.parameters) + self.cf.addcolumn(xl, "xl") + self.cf.addcolumn(yl, "yl") + self.cf.addcolumn(zl, "zl") + self.peaks_xyzT = np.array((self.cf.xl, self.cf.yl, self.cf.zl)).T.copy() om = self.cf.omega osign = self.transformpars.get("omegasign") tth, eta = transform.compute_tth_eta_from_xyz( - self.peaks_xyzT.T, om*osign, - **self.transformpars.parameters) - self.cf.addcolumn( tth, "tth", ) - self.cf.addcolumn( eta, "eta", ) - self.cf.addcolumn( refinegrains.lf(tth,eta), 'lf') + self.peaks_xyzT.T, om * osign, **self.transformpars.parameters + ) + self.cf.addcolumn( + tth, + "tth", + ) + self.cf.addcolumn( + eta, + "eta", + ) + self.cf.addcolumn(refinegrains.lf(tth, eta), "lf") gx, gy, gz = transform.compute_g_vectors( - tth, eta, om*osign, - wvln = self.transformpars.get("wavelength"), - wedge = self.transformpars.get("wedge"), - chi = self.transformpars.get("chi") ) + tth, + eta, + om * osign, + wvln=self.transformpars.get("wavelength"), + wedge=self.transformpars.get("wedge"), + chi=self.transformpars.get("chi"), + ) self.cf.addcolumn(gx, "gx") self.cf.addcolumn(gy, "gy") - self.cf.addcolumn(gz, "gz") - self.cf.addcolumn( np.sqrt( gx * gx + - gy * gy + - gz * gz ), - "modg") - + self.cf.addcolumn(gz, "gz") + self.cf.addcolumn(np.sqrt(gx * gx + gy * gy + gz * gz), "modg") + def reset(self): - """ when pars change or colfile changes etc """ + """when pars change or colfile changes etc""" if "ring" not in self.cf.titles: - self.cf.addcolumn( -np.ones(self.cf.nrows), "ring" ) - self.cf.addcolumn( 42*np.ones(self.cf.nrows), "ringerr" ) + self.cf.addcolumn(-np.ones(self.cf.nrows), "ring") + self.cf.addcolumn(42 * np.ones(self.cf.nrows), "ringerr") if "labels" not in self.cf.titles: - self.cf.addcolumn( -np.ones(self.cf.nrows), "labels" ) - self.mok = np.zeros( self.cf.nrows, bool ) - + self.cf.addcolumn(-np.ones(self.cf.nrows), "labels") + self.mok = np.zeros(self.cf.nrows, bool) - def tthcalc(self, hkls = None): + def tthcalc(self, hkls=None): """ - Computes the twotheta for a unit cell given a list of + Computes the twotheta for a unit cell given a list of hkls or from the unit cell generating a list of hkls. """ if hkls is None: dslimit = self.cf.modg.max() - wvln = self.transformpars.get( "wavelength" ) - ttol = self.transformpars.get( "fit_tolerance" ) # tth units + wvln = self.transformpars.get("wavelength") + ttol = self.transformpars.get("fit_tolerance") # tth units # at limit this ttol make which ds range? - tthlim = 2 * np.degrees( np.arcsin( dslimit * wvln / 2 ) ) - dstol = 2 * np.sin( np.radians( (tthlim + ttol) / 2 ) - ) / wvln - dslimit - self.unitcell.makerings(dslimit+dstol/2, tol = dstol) + tthlim = 2 * np.degrees(np.arcsin(dslimit * wvln / 2)) + dstol = 2 * np.sin(np.radians((tthlim + ttol) / 2)) / wvln - dslimit + self.unitcell.makerings(dslimit + dstol / 2, tol=dstol) else: - self.unitcell.peaks = [ (self.unitcell.ds(h),h) for h in hkls ] - self.unitcell.makerings( dslimit, tol=dstol ) # , newpeaks=False ) - self.unitcell.ringtth = [ 2 * np.degrees( np.arcsin( ds * wvln / 2 ) ) - for ds - in self.unitcell.ringds ] + self.unitcell.peaks = [(self.unitcell.ds(h), h) for h in hkls] + self.unitcell.makerings(dslimit, tol=dstol) # , newpeaks=False ) + self.unitcell.ringtth = [ + 2 * np.degrees(np.arcsin(ds * wvln / 2)) for ds in self.unitcell.ringds + ] def assigntorings(self): """ Assign the g-vectors to hkl rings that are in self.unitcell """ - self.cf.ring[:]=-1 - self.cf.ringerr[:]=42. - tol = self.transformpars.get( "fit_tolerance" ) - for i,tth in enumerate(self.unitcell.ringtth): - diff = self.cf.tth - tth - adiff = np.abs(diff) - mask = (adiff < tol) & (adiff < self.cf.ringerr) - if mask.sum()>0: - self.cf.ring[mask] = i - self.cf.ringerr[mask] = diff[mask] - - + self.cf.ring[:] = -1 + self.cf.ringerr[:] = 42.0 + tol = self.transformpars.get("fit_tolerance") + for i, tth in enumerate(self.unitcell.ringtth): + diff = self.cf.tth - tth + adiff = np.abs(diff) + mask = (adiff < tol) & (adiff < self.cf.ringerr) + if mask.sum() > 0: + self.cf.ring[mask] = i + self.cf.ringerr[mask] = diff[mask] + def printringassign(self): # Report on assignments print("Ring ( h, k, l) Mult total indexed to_index ") @@ -151,253 +158,274 @@ def printringassign(self): dsr = self.unitcell.ringds for j in range(len(dsr))[::-1]: ind = self.cf.ring == j - n_indexed = (self.cf.labels[ind] > -0.5).sum() - n_to_index = (self.cf.labels[ind] < -0.5).sum() - h=self.unitcell.ringhkls[dsr[j]][0] - print("Ring %-3d (%3d,%3d,%3d) %3d %5d %5d %5d %.4f"%(\ - j,h[0],h[1],h[2],len(self.unitcell.ringhkls[dsr[j]]), - ind.sum(),n_indexed,n_to_index, - self.unitcell.ringtth[j] )) - print("Total peaks",self.cf.nrows,"assigned",(self.cf.ring>=0).sum()) - - - def rings_2_use(self, rings = None, multimin = 12 ): + n_indexed = (self.cf.labels[ind] > -0.5).sum() + n_to_index = (self.cf.labels[ind] < -0.5).sum() + h = self.unitcell.ringhkls[dsr[j]][0] + print( + "Ring %-3d (%3d,%3d,%3d) %3d %5d %5d %5d %.4f" + % ( + j, + h[0], + h[1], + h[2], + len(self.unitcell.ringhkls[dsr[j]]), + ind.sum(), + n_indexed, + n_to_index, + self.unitcell.ringtth[j], + ) + ) + print("Total peaks", self.cf.nrows, "assigned", (self.cf.ring >= 0).sum()) + + def rings_2_use(self, rings=None, multimin=12): """Filter rings as having low multiplicity for indexing searches - + Give rings = [list of rings to use] Or multimin = all rings with low multiplicity """ ring = self.cf.ring.astype(int) if rings is None: - mok = np.zeros( (self.cf.nrows,), bool ) - for i, ds in enumerate( self.unitcell.ringds ): + mok = np.zeros((self.cf.nrows,), bool) + for i, ds in enumerate(self.unitcell.ringds): mult = len(self.unitcell.ringhkls[ds]) if mult <= multimin: - mok[ ring == i ] = True + mok[ring == i] = True else: - mok = np.zeros( (self.cf.nrows,), bool ) + mok = np.zeros((self.cf.nrows,), bool) for r in rings: - mok[ ring == r ] = True - self.mok = mok - - def search1d(self, gvec_id, - hkl = None, - angstart = -180, - angend = 180, - nang = 3600, - tol = 0.1, - ): + mok[ring == r] = True + self.mok = mok + + def search1d( + self, + gvec_id, + hkl=None, + angstart=-180, + angend=180, + nang=3600, + tol=0.1, + ): """ - gvec_id = an integer for a row of self.colfile - hkl = hkl indices to assign to the peak (None + gvec_id = an integer for a row of self.colfile + hkl = hkl indices to assign to the peak (None means guess from self.cf.ring) This is inspired from Bernier's fibre texture method (citation: https://github.com/HEXRD/hexrd/blob/master/hexrd/findorientations.py - ) + ) """ - gv = np.array( (self.cf.gx[gvec_id], - self.cf.gy[gvec_id], - self.cf.gz[gvec_id]), float) + gv = np.array( + (self.cf.gx[gvec_id], self.cf.gy[gvec_id], self.cf.gz[gvec_id]), float + ) print(gvec_id) if hkl is None: ring = self.cf.ring[gvec_id] ds = self.unitcell.ringds[int(ring)] - hkls = self.unitcell.ringhkls[ ds ] - hkl = np.array( hkls[0], int ) - print("Choosing",hkl,"from hkls", hkls) + hkls = self.unitcell.ringhkls[ds] + hkl = np.array(hkls[0], int) + print("Choosing", hkl, "from hkls", hkls) assert hkl.shape == (3,) - g0 = np.dot( self.unitcell.B, hkl ) # non rotated g + g0 = np.dot(self.unitcell.B, hkl) # non rotated g # normalised vectors - n0 = g0/np.linalg.norm( g0 ) - nobs = gv/np.linalg.norm( gv ) - cosa = np.dot( n0, nobs ) - ang = np.arccos(cosa) + n0 = g0 / np.linalg.norm(g0) + nobs = gv / np.linalg.norm(gv) + cosa = np.dot(n0, nobs) + ang = np.arccos(cosa) # if the vectors are already parallel ? if ang < np.radians(0.001): u0 = np.eye(3) else: - vec = np.cross( n0, nobs ) - sina = np.linalg.norm( vec ) - u0 = Rotation.from_rotvec( ang * vec / sina ).as_matrix() - ub0 = np.dot( u0, self.unitcell.B ) - ubi0 = np.linalg.inv( ub0 ) + vec = np.cross(n0, nobs) + sina = np.linalg.norm(vec) + u0 = Rotation.from_rotvec(ang * vec / sina).as_matrix() + ub0 = np.dot(u0, self.unitcell.B) + ubi0 = np.linalg.inv(ub0) # Now we want to rotate around nobs - allgve = np.array( (self.cf.gx,self.cf.gy,self.cf.gz) ).T.copy() + allgve = np.array((self.cf.gx, self.cf.gy, self.cf.gz)).T.copy() scores = [] ubis = [] - gc = np.dot( ub0, hkl ) - angs = np.radians( np.linspace( angstart, angend, nang) ) - ubis = [ np.dot(ubi0, - Rotation.from_rotvec( nobs * a ).as_matrix()) - for a in angs ] - scores = [ cImageD11.score( ubi, allgve, tol ) - for ubi in ubis ] + # gc = np.dot(ub0, hkl) # gcalc + angs = np.radians(np.linspace(angstart, angend, nang)) + ubis = [np.dot(ubi0, Rotation.from_rotvec(nobs * a).as_matrix()) for a in angs] + scores = [cImageD11.score(ubi, allgve, tol) for ubi in ubis] return scores, ubis - - def choose( self, gnum, tol): + + def choose(self, gnum, tol): isig = self.cf.npixels * self.cf.avg_intensity * self.cf.lf - idp = np.argmax( self.mok * (self.cf.labels<0) * isig ) + idp = np.argmax(self.mok * (self.cf.labels < 0) * isig) angstart = 0 angend = 360 nang = 3600 - s,ubis=self.search1d(idp, tol=tol, - angstart = angstart, angend=angend, nang=nang) + s, ubis = self.search1d( + idp, tol=tol, angstart=angstart, angend=angend, nang=nang + ) matfit = ubis[np.argmax(s)].copy() tfit = np.zeros(3) tol = 0.05 - inds, hkls = self.assign( matfit, tfit, tol ) - matfit, tfit = self.refine( matfit, tfit, inds, hkls, tol ) - inds, hkls = self.assign( matfit, tfit, tol ) - print( inds.shape, tfit ) - print( indexing.ubitocellpars( matfit ) ) - self.cf.labels[ inds ] = gnum - self.grains[ gnum ] = grain.grain( matfit, tfit ) - - - def pairs(self, hkl1, hkl2, cos_tol = 0.02, hkl_tol = 0.1): + inds, hkls = self.assign(matfit, tfit, tol) + matfit, tfit = self.refine(matfit, tfit, inds, hkls, tol) + inds, hkls = self.assign(matfit, tfit, tol) + print(inds.shape, tfit) + print(indexing.ubitocellpars(matfit)) + self.cf.labels[inds] = gnum + self.grains[gnum] = grain.grain(matfit, tfit) + + def pairs(self, hkl1, hkl2, cos_tol=0.02, hkl_tol=0.1): """ We only look for reflection pairs matching a single hkl pairing """ import time + start = time.time() - w = self.transformpars.get( "wavelength" ) - tth1 = get_tth(self.unitcell.ds(hkl1), w) + w = self.transformpars.get("wavelength") + tth1 = get_tth(self.unitcell.ds(hkl1), w) tth2 = get_tth(self.unitcell.ds(hkl2), w) - tthtol = self.transformpars.get( "fit_tolerance" ) - allinds = np.arange( self.cf.nrows ) - ind1 = allinds[ abs(self.cf.tth - tth1) < tthtol ] - ind2 = allinds[ abs(self.cf.tth - tth2) < tthtol ] - angle, cosangle = self.unitcell.anglehkls( hkl1, hkl2 ) - print("Angle, cosangle",angle,cosangle,hkl1,hkl2) + tthtol = self.transformpars.get("fit_tolerance") + allinds = np.arange(self.cf.nrows) + ind1 = allinds[abs(self.cf.tth - tth1) < tthtol] + ind2 = allinds[abs(self.cf.tth - tth2) < tthtol] + angle, cosangle = self.unitcell.anglehkls(hkl1, hkl2) + print("Angle, cosangle", angle, cosangle, hkl1, hkl2) assert angle > 1 and angle < 179, "hkls are parallel" - g = np.array( (self.cf.gx, self.cf.gy, self.cf.gz), float ) - n = g/self.cf.modg + g = np.array((self.cf.gx, self.cf.gy, self.cf.gz), float) + n = g / self.cf.modg gvf = g.T.copy() - n1 = n[:,ind1] - n2 = n[:,ind2] + n1 = n[:, ind1] + n2 = n[:, ind2] pairs = [] - j = np.arange( n2.shape[1] ) + j = np.arange(n2.shape[1]) # from unitcell.orient - h1c=unit(np.dot( self.unitcell.B, hkl1 )) - h2c=unit(np.dot( self.unitcell.B, hkl2 )) - t1c=unit(h1c) - t3c=unit(np.cross(h1c,h2c)) - t2c=unit(np.cross(h1c,t3c)) - T_c = np.array( [t1c, t2c, t3c] ) - T_g = np.zeros((3,3)) - for i,this_n1 in enumerate(n1.T): - cosa = np.dot( this_n1, n2 ) - goodones = j[abs(cosa - cosangle) < cos_tol] - # t1 is along g1 - # t2 is plane of both: g1x(g1xg2) - # t3 is perpendicular to both - #if i%100==0: - # print i,time.time()-start,len(n1.T) - for k in goodones: - this_n2 = n2[:,k] - T_g[0] = this_n1 - T_g[2] = unit( np.cross( this_n1, this_n2 ) ) - T_g[1] = unit( np.cross( this_n1, T_g[2]) ) - U = np.dot( T_g.T, T_c) - ub = np.dot( U, self.unitcell.B) - ubi = np.linalg.inv( ub ) - ubio = ubi.copy() - npks = cImageD11.score(ubi,gvf,hkl_tol) - pairs.append( (ind1[i], ind2[k], U, ubi ) ) - print(npks, end=' ') - - ubi, trans = self.refine( ubi, np.zeros(3,float), tol=hkl_tol ) - inds, hkls = self.assign( ubi, trans, hkl_tol ) - ubi, trans = self.refine( ubi, trans, inds = inds, hkls= hkls, tol=hkl_tol ) - print(npks, ubi) - print("cell: ",6*"%.6f "%( indexing.ubitocellpars(ubi) )) - print("position: ",trans) - print() - self.pairscache=pairs - print(time.time()-start,"for",len(pairs),n1.shape, n2.shape) - return pairs + h1c = unit(np.dot(self.unitcell.B, hkl1)) + h2c = unit(np.dot(self.unitcell.B, hkl2)) + t1c = unit(h1c) + t3c = unit(np.cross(h1c, h2c)) + t2c = unit(np.cross(h1c, t3c)) + T_c = np.array([t1c, t2c, t3c]) + T_g = np.zeros((3, 3)) + for i, this_n1 in enumerate(n1.T): + cosa = np.dot(this_n1, n2) + goodones = j[abs(cosa - cosangle) < cos_tol] + # t1 is along g1 + # t2 is plane of both: g1x(g1xg2) + # t3 is perpendicular to both + # if i%100==0: + # print i,time.time()-start,len(n1.T) + for k in goodones: + this_n2 = n2[:, k] + T_g[0] = this_n1 + T_g[2] = unit(np.cross(this_n1, this_n2)) + T_g[1] = unit(np.cross(this_n1, T_g[2])) + U = np.dot(T_g.T, T_c) + ub = np.dot(U, self.unitcell.B) + ubi = np.linalg.inv(ub) + # ubio = ubi.copy() + npks = cImageD11.score(ubi, gvf, hkl_tol) + pairs.append((ind1[i], ind2[k], U, ubi)) + print(npks, end=" ") + ubi, trans = self.refine(ubi, np.zeros(3, float), tol=hkl_tol) + inds, hkls = self.assign(ubi, trans, hkl_tol) + ubi, trans = self.refine(ubi, trans, inds=inds, hkls=hkls, tol=hkl_tol) + print(npks, ubi) + print("cell: ", 6 * "%.6f " % (indexing.ubitocellpars(ubi))) + print("position: ", trans) + print() + self.pairscache = pairs + print(time.time() - start, "for", len(pairs), n1.shape, n2.shape) + return pairs def assign(self, ubi, translation, tol): - gv = np.zeros(self.peaks_xyzT.shape, float ) - cImageD11.compute_gv( self.peaks_xyzT, - self.cf.omega, - self.transformpars.get('omegasign'), - self.transformpars.get('wavelength'), - self.transformpars.get('wedge'), - self.transformpars.get('chi'), - translation, - gv) - hkl = np.dot( ubi, gv.T ) - hkli = np.floor( hkl + 0.5 ) - e = (hkl - hkli) - e = (e*e).sum(axis=0) - inds = np.compress( e < tol*tol, np.arange(len(hkl[0])) ) - return inds , hkli[:,inds] - + gv = np.zeros(self.peaks_xyzT.shape, float) + cImageD11.compute_gv( + self.peaks_xyzT, + self.cf.omega, + self.transformpars.get("omegasign"), + self.transformpars.get("wavelength"), + self.transformpars.get("wedge"), + self.transformpars.get("chi"), + translation, + gv, + ) + hkl = np.dot(ubi, gv.T) + hkli = np.floor(hkl + 0.5) + e = hkl - hkli + e = (e * e).sum(axis=0) + inds = np.compress(e < tol * tol, np.arange(len(hkl[0]))) + return inds, hkli[:, inds] NC = 0 - def gof( self, p, *args ): + + def gof(self, p, *args): self.NC += 1 try: hkls, inds, peaks_xyzT, gobs, omega = args except: print(args, len(args)) raise - p.shape = 4,3 + p.shape = 4, 3 ub = p[:3] t = p[3] - gcalc = np.dot( ub, hkls ) - cImageD11.compute_gv( peaks_xyzT, - omega, - self.transformpars.get('omegasign'), - self.transformpars.get('wavelength'), - self.transformpars.get('wedge'), - self.transformpars.get('chi'), - t, - gobs) - #print gobs - #print gcalc - #print (gobs-gcalc).ravel() - #1/0 + gcalc = np.dot(ub, hkls) + cImageD11.compute_gv( + peaks_xyzT, + omega, + self.transformpars.get("omegasign"), + self.transformpars.get("wavelength"), + self.transformpars.get("wedge"), + self.transformpars.get("chi"), + t, + gobs, + ) + # print gobs + # print gcalc + # print (gobs-gcalc).ravel() + # 1/0 e = (gcalc - gobs.T).ravel() - p.shape = 12, - # print p-0.1,(e*e).sum() - return e#(e*e).sum() - - def refine(self, ubi, translation=(0,0,0) , - inds = None, hkls = None, - tol = 0.05): + p.shape = (12,) + # print p-0.1,(e*e).sum() + return e # (e*e).sum() + + def refine(self, ubi, translation=(0, 0, 0), inds=None, hkls=None, tol=0.05): """ Fit ubi and translation ihkls = array of peak_index, h, k, l tol = hkl tolerance to use in fitting """ - import time - self.NC =0 - start = time.time() + + self.NC = 0 + # start = time.time() if inds is None: - inds , hkls = self.assign( ubi, translation, tol ) - ub = np.linalg.inv( ubi ) - x0 = np.array( list( ub.ravel() ) + list(translation ) ) + inds, hkls = self.assign(ubi, translation, tol) + ub = np.linalg.inv(ubi) + x0 = np.array(list(ub.ravel()) + list(translation)) fun = self.gof - args = (hkls, inds, self.peaks_xyzT[inds], - np.zeros(self.peaks_xyzT[inds].shape ), - self.cf.omega[inds]) - def Dfun( x0, *args ): - epsilon = np.ones(12)*1e-6 - epsilon[-3:] = 1. - return deriv( x0, fun, epsilon, *args) - res, ier = scipy.optimize.leastsq( fun, x0, args, )# - # Dfun, col_deriv=True) - ub = np.reshape(res[:9], (3,3)) + args = ( + hkls, + inds, + self.peaks_xyzT[inds], + np.zeros(self.peaks_xyzT[inds].shape), + self.cf.omega[inds], + ) + + def Dfun(x0, *args): + epsilon = np.ones(12) * 1e-6 + epsilon[-3:] = 1.0 + return deriv(x0, fun, epsilon, *args) + + res, ier = scipy.optimize.leastsq( + fun, + x0, + args, + ) # + # Dfun, col_deriv=True) + ub = np.reshape(res[:9], (3, 3)) t = res[-3:] - ubi = np.linalg.inv( ub ) + ubi = np.linalg.inv(ub) return ubi, t - -def deriv( xk, f, epsilon, *args): + + +def deriv(xk, f, epsilon, *args): f0 = f(*((xk,) + args)) - grad = np.zeros((len(xk),len(f0)), float) + grad = np.zeros((len(xk), len(f0)), float) ei = np.zeros((len(xk),), float) for k in range(len(xk)): ei[k] = 1.0 @@ -406,43 +434,43 @@ def deriv( xk, f, epsilon, *args): ei[k] = 0.0 return grad -if __name__=="__main__": + +if __name__ == "__main__": from ImageD11.columnfile import columnfile from ImageD11.parameters import read_par_file import ImageD11.grain import sys + p = read_par_file(sys.argv[1]) - c = columnfile( sys.argv[2] ) - i = indexer( p, c ) + c = columnfile(sys.argv[2]) + i = indexer(p, c) if sys.argv[3][:3] == "fit": # 0 1 2 3 4 5 # \test\simul_1000_grains>python ..\..\ImageD11\indexer.py Al1000\Al1000.par Al1000\Al1000.flt fit allgrid.map allgridfitscipy.map - gl = ImageD11.grain.read_grain_file( sys.argv[4] ) - inds = np.arange( len(gl), dtype=int ) - allhkls = np.array( (c.h, c.k, c.l) ) - for k,g in enumerate(gl): + gl = ImageD11.grain.read_grain_file(sys.argv[4]) + inds = np.arange(len(gl), dtype=int) + allhkls = np.array((c.h, c.k, c.l)) + for k, g in enumerate(gl): if len(sys.argv[3]) == len("fit"): - for j, tol in enumerate([0.05,0.02,0.01,0.0075]): - inds, hkls = i.assign( g.ubi, g.translation, tol ) - ubi, t = i.refine( g.ubi, - translation=g.translation, - inds = inds, hkls = hkls, - tol = tol) + for j, tol in enumerate([0.05, 0.02, 0.01, 0.0075]): + inds, hkls = i.assign(g.ubi, g.translation, tol) + ubi, t = i.refine( + g.ubi, translation=g.translation, inds=inds, hkls=hkls, tol=tol + ) g.translation = t - g.set_ubi( ubi ) + g.set_ubi(ubi) else: - inds = np.compress( c.labels == k , inds ) - hkls = np.compress( c.labels == k , allhkls ) + inds = np.compress(c.labels == k, inds) + hkls = np.compress(c.labels == k, allhkls) for j in range(3): - ubi, t = i.refine( g.ubi, - translation=g.translation, - inds = inds, hkls = hkls, - tol = tol) + ubi, t = i.refine( + g.ubi, translation=g.translation, inds=inds, hkls=hkls, tol=tol + ) g.translation = t - g.set_ubi( ubi ) - print(k, len(inds),6*"%.8f "%(indexing.ubitocellpars(ubi))) - print("\t",t) - ImageD11.grain.write_grain_file( sys.argv[5], gl ) + g.set_ubi(ubi) + print(k, len(inds), 6 * "%.8f " % (indexing.ubitocellpars(ubi))) + print("\t", t) + ImageD11.grain.write_grain_file(sys.argv[5], gl) else: i.updatecolfile() i.tthcalc() @@ -454,4 +482,4 @@ def deriv( xk, f, epsilon, *args): i.pairs(hkl1, hkl2) - # e.g. python indexer.py ../test/demo/eu3.pars ../test/demo/eu.flt 2,0,0 0,0,4 +# e.g. python indexer.py ../test/demo/eu3.pars ../test/demo/eu.flt 2,0,0 0,0,4 diff --git a/ImageD11/indexing.py b/ImageD11/indexing.py index 6dea01a1..fb44c559 100644 --- a/ImageD11/indexing.py +++ b/ImageD11/indexing.py @@ -1,4 +1,3 @@ - from __future__ import print_function, division @@ -22,132 +21,151 @@ import numpy as np from . import cImageD11, unitcell -from xfab.tools import ubi_to_u, u_to_rod, ubi_to_rod +from xfab.tools import ubi_to_u, ubi_to_rod -import math, time, sys +import math, time loglevel = 1 -class clogging(object): # because multiprocessing. FIXME. object level logging rather than module ? + +class clogging( + object +): # because multiprocessing. FIXME. object level logging rather than module ? def log(self, *args): print(" ".join(str(a) for a in args)) + def debug(self, *args): - if loglevel <= 0: logging.log('debug:',*args) + if loglevel <= 0: + logging.log("debug:", *args) + def info(self, *args): - if loglevel <= 1: logging.log('info:',*args) + if loglevel <= 1: + logging.log("info:", *args) + def warning(self, *args): - if loglevel <= 2: logging.log('warning:',*args) + if loglevel <= 2: + logging.log("warning:", *args) + def error(self, *args): - if loglevel <= 3: logging.log('error:',*args) + if loglevel <= 3: + logging.log("error:", *args) + logging = clogging() -def ubi_fit_2pks( ubi, g1, g2): + +def ubi_fit_2pks(ubi, g1, g2): """ Refine a ubi matrix so it matches the pair of g-vectors supplied (almost) accounts for cell parameters not being quite right + + FIXME : This does not work. """ - ub = np.linalg.inv( ubi ) - h1 = np.round( np.dot( ubi, g1 ) ) - h2 = np.round( np.dot( ubi, g2 ) ) - g3 = np.cross( g1, g2 ) - h3 = np.dot( ubi, g3 ) # do not round to integer - g1c = np.dot( ub, h1 ) - g2c = np.dot( ub, h2 ) - g3c = np.dot( ub, h3 ) - R = np.outer( g1, h1 ) + np.outer( g2, h2 ) + np.outer( g3, h3 ) - H = np.outer( h1, h1 ) + np.outer( h2, h2 ) + np.outer( h3, h3 ) - ubfit = np.dot( R, np.linalg.inv( H ) ) - ubifit = np.linalg.inv( ubfit ) + # ub = np.linalg.inv(ubi) + h1 = np.round(np.dot(ubi, g1)) + h2 = np.round(np.dot(ubi, g2)) + g3 = np.cross(g1, g2) + h3 = np.dot(ubi, g3) # do not round to integer + # g1c = np.dot(ub, h1) + # g2c = np.dot(ub, h2) + # g3c = np.dot(ub, h3) + R = np.outer(g1, h1) + np.outer(g2, h2) + np.outer(g3, h3) + H = np.outer(h1, h1) + np.outer(h2, h2) + np.outer(h3, h3) + ubfit = np.dot(R, np.linalg.inv(H)) + ubifit = np.linalg.inv(ubfit) return ubifit - - def myhistogram(data, bins): - """ - The numpy histogram api was changed - So here is an api that will not change - It is based on that from the old Numeric manual - """ - n = np.searchsorted( np.sort(data), bins ) - n = np.concatenate( [ n, [len(data)]] ) - return n[1:] - n[:-1] + """ + The numpy histogram api was changed + So here is an api that will not change + It is based on that from the old Numeric manual + """ + n = np.searchsorted(np.sort(data), bins) + n = np.concatenate([n, [len(data)]]) + return n[1:] - n[:-1] def readubis(ubifile): - """read ubifile and return a list of ubi arrays """ + """read ubifile and return a list of ubi arrays""" f = open(ubifile, "r") ubisread = [] u = [] for line in f: - if line[0]=="#": + if line[0] == "#": continue - vals = [ float(x) for x in line.split() ] + vals = [float(x) for x in line.split()] if len(vals) == 3: u = u + [vals] - if len(u)==3: + if len(u) == 3: ubisread.append(np.array(u)) u = [] f.close() return ubisread + def write_ubi_file(filename, ubilist): - """ save 3x3 matrices into file """ - f=open(filename,"w") + """save 3x3 matrices into file""" + f = open(filename, "w") for u in ubilist: - f.write("%f %f %f\n" %(u[0][0],u[0][1],u[0][2])) - f.write("%f %f %f\n" %(u[1][0],u[1][1],u[1][2])) - f.write("%f %f %f\n\n"%(u[2][0],u[2][1],u[2][2])) + f.write("%f %f %f\n" % (u[0][0], u[0][1], u[0][2])) + f.write("%f %f %f\n" % (u[1][0], u[1][1], u[1][2])) + f.write("%f %f %f\n\n" % (u[2][0], u[2][1], u[2][2])) f.close() + def ubitocellpars(ubi): - """ convert ubi matrix to unit cell """ + """convert ubi matrix to unit cell""" g = np.dot(ubi, np.transpose(ubi)) from math import acos, degrees, sqrt - a = sqrt(g[0,0]) - b = sqrt(g[1,1]) - c = sqrt(g[2,2]) - alpha = degrees( acos(g[1,2]/b/c)) - beta = degrees( acos(g[0,2]/a/c)) - gamma = degrees( acos(g[0,1]/a/b)) + + a = sqrt(g[0, 0]) + b = sqrt(g[1, 1]) + c = sqrt(g[2, 2]) + alpha = degrees(acos(g[1, 2] / b / c)) + beta = degrees(acos(g[0, 2] / a / c)) + gamma = degrees(acos(g[0, 1] / a / b)) return a, b, c, alpha, beta, gamma + def ubitoU(ubi): """ convert ubi to Grainspotter style U The convention is B as being triangular, hopefully as Busing and Levy TODO - make some testcases please!! """ - #return np.transpose(np.dot(ubitoB(ubi),ubi)) + # return np.transpose(np.dot(ubitoB(ubi),ubi)) return ubi_to_u(ubi) + def ubitoRod(ubi): """ TODO Testcases!!! """ -# u = ubitoU(ubi) -# w, v = np.linalg.eig(u) -# print 'Eigenvalues' -# print w -# print 'Eigen vectors' -# print v -# #ehat = v[:,0] -# #angle = -1*math.acos(np.clip(w[order[1]].real,-1,1)) -# order = np.argsort(w.real) -# print order -# ehat = v[:, order[-1]] -# if order.tolist() != range(3): -# print 'HHFH' -# angle = -1*np.arccos(w[order[1]].real) -# else: -# angle = np.arccos(w[order[1]].real) -# Rod = ehat * math.tan(angle/2) -# return Rod.real + # u = ubitoU(ubi) + # w, v = np.linalg.eig(u) + # print 'Eigenvalues' + # print w + # print 'Eigen vectors' + # print v + # #ehat = v[:,0] + # #angle = -1*math.acos(np.clip(w[order[1]].real,-1,1)) + # order = np.argsort(w.real) + # print order + # ehat = v[:, order[-1]] + # if order.tolist() != range(3): + # print 'HHFH' + # angle = -1*np.arccos(w[order[1]].real) + # else: + # angle = np.arccos(w[order[1]].real) + # Rod = ehat * math.tan(angle/2) + # return Rod.real return ubi_to_rod(ubi) + def ubitoB(ubi): - """ give the B matrix from ubi """ + """give the B matrix from ubi""" g = np.dot(ubi, np.transpose(ubi)) return np.transpose(np.linalg.inv(np.linalg.cholesky(g))) @@ -165,6 +183,7 @@ def mod_360(theta, target): diff = theta - target return theta + def calc_drlv2(UBI, gv): """ Get the difference from being integer hkls @@ -173,13 +192,12 @@ def calc_drlv2(UBI, gv): returns drlv2 = (h_calc - h_int)^2 """ h = np.dot(UBI, np.transpose(gv)) - hint = np.floor(h + 0.5).astype(int) # rounds down + hint = np.floor(h + 0.5).astype(int) # rounds down diff = h - hint - drlv2 = np.sum(diff * diff,0) + drlv2 = np.sum(diff * diff, 0) return drlv2 - def refine(UBI, gv, tol, quiet=True): """ Refine an orientation matrix and rescore it. @@ -197,53 +215,52 @@ def refine(UBI, gv, tol, quiet=True): # print "Scores before",self.score(UBI) # Need to find hkl indices for all of the peaks which are indexed h = np.dot(UBI, np.transpose(gv)) - hint = np.floor(h+0.5).astype(int) # rounds down + hint = np.floor(h + 0.5).astype(int) # rounds down diff = h - hint - drlv2 = np.sum( diff * diff, 0) + drlv2 = np.sum(diff * diff, 0) tol = float(tol) tol = tol * tol # Only use peaks which are assigned to rings for refinement - ind = np.compress( np.less(drlv2,tol) , np.arange(gv.shape[0]) ) + ind = np.compress(np.less(drlv2, tol), np.arange(gv.shape[0])) # scoreb4=ind.shape[0] contribs = drlv2[ind] try: - fitb4=math.sqrt(np.sum(contribs)/contribs.shape[0]) + fitb4 = math.sqrt(np.sum(contribs) / contribs.shape[0]) if not quiet: - logging.debug("Fit before refinement %.8f %5d"% ( - fitb4, contribs.shape[0])) + logging.debug("Fit before refinement %.8f %5d" % (fitb4, contribs.shape[0])) except: - logging.error("No contributing reflections for \n%s"%(str(UBI))) + logging.error("No contributing reflections for \n%s" % (str(UBI))) raise # drlv2_old=drlv2 - R=np.zeros((3,3),float) - H=np.zeros((3,3),float) + R = np.zeros((3, 3), float) + H = np.zeros((3, 3), float) for i in ind: - r = gv[i,:] - k = hint[:,i].astype(float) + r = gv[i, :] + k = hint[:, i].astype(float) # print r,k - R = R + np.outer(r,k) - H = H + np.outer(k,k) + R = R + np.outer(r, k) + H = H + np.outer(k, k) try: - HI=np.linalg.inv(H) - UBoptimal=np.dot(R,HI) - UBIo=np.linalg.inv(UBoptimal) + HI = np.linalg.inv(H) + UBoptimal = np.dot(R, HI) + UBIo = np.linalg.inv(UBoptimal) except: # A singular matrix - this sucks. - UBIo=UBI - h=np.dot(UBIo,np.transpose(gv)) - hint=np.floor(h+0.5).astype(int) # rounds down - diff=h-hint - drlv2=np.sum(diff*diff,0) - ind = np.compress( np.less(drlv2,tol), np.arange(gv.shape[0]) ) + UBIo = UBI + h = np.dot(UBIo, np.transpose(gv)) + hint = np.floor(h + 0.5).astype(int) # rounds down + diff = h - hint + drlv2 = np.sum(diff * diff, 0) + ind = np.compress(np.less(drlv2, tol), np.arange(gv.shape[0])) # scorelastrefined=ind.shape[0] contribs = drlv2[ind] try: - fitlastrefined=math.sqrt(np.sum(contribs)/contribs.shape[0]) + fitlastrefined = math.sqrt(np.sum(contribs) / contribs.shape[0]) if not quiet: - logging.debug("after %.8f %5d"%(fitlastrefined,contribs.shape[0])) + logging.debug("after %.8f %5d" % (fitlastrefined, contribs.shape[0])) except: logging.error("\n\n\n") - logging.error("No contributing reflections for %s\n"%(str(UBI))) + logging.error("No contributing reflections for %s\n" % (str(UBI))) logging.error("After refinement, it was OK before ???") logging.error("\n\n\n") return UBI @@ -258,31 +275,35 @@ def refine(UBI, gv, tol, quiet=True): # print "Mean drlv old",sum(sqrt(drlv2_old))/drlv2_old.shape[0] return UBIo -def indexer_from_colfile( colfile, **kwds ): - uc = unitcell.unitcell_from_parameters( colfile.parameters ) - w = float( colfile.parameters.get("wavelength") ) - gv = np.array( (colfile.gx,colfile.gy,colfile.gz), float) - kwds.update( {"unitcell": uc, "wavelength":w, "gv":gv.T } ) - return indexer( **kwds ) + +def indexer_from_colfile(colfile, **kwds): + uc = unitcell.unitcell_from_parameters(colfile.parameters) + w = float(colfile.parameters.get("wavelength")) + gv = np.array((colfile.gx, colfile.gy, colfile.gz), float) + kwds.update({"unitcell": uc, "wavelength": w, "gv": gv.T}) + return indexer(**kwds) class indexer: """ A class for searching for orientation matrices """ - def __init__(self, - unitcell=None, - gv=None, - cosine_tol = 0.002, - minpks = 10 , - hkl_tol=0.01, - ring_1=1, - ring_2=2, - ds_tol=0.005, - wavelength=-1, - uniqueness=0.5, - eta_range=0., - max_grains=100): + + def __init__( + self, + unitcell=None, + gv=None, + cosine_tol=0.002, + minpks=10, + hkl_tol=0.01, + ring_1=1, + ring_2=2, + ds_tol=0.005, + wavelength=-1, + uniqueness=0.5, + eta_range=0.0, + max_grains=100, + ): """ Unitcell would be a unitcell object for generating hkls peaks gv would be a 3*n array of points in reciprocal space @@ -290,68 +311,77 @@ def __init__(self, """ # This stop variable allows computation to be run in a thread... self.stop = False - self.unitcell=unitcell - self.gv=gv + self.unitcell = unitcell + self.gv = gv self.ra = None - if gv is not None: # do init - logging.info('gv: %s %s %s'%( str(gv), str(gv.shape), str(gv.dtype))) + if gv is not None: # do init + logging.info("gv: %s %s %s" % (str(gv), str(gv.shape), str(gv.dtype))) assert gv.shape[1] == 3 - self.gv = gv.astype( float ) - self.ds = np.sqrt( (gv*gv).sum(axis=1) ) - self.ga = np.zeros(len(self.ds),np.int32)-1 # Grain assignments - self.gvflat=np.ascontiguousarray(gv, float) - self.wedge=0.0 # Default - - self.cosine_tol=cosine_tol - self.wavelength=wavelength - self.hkl_tol=hkl_tol - self.ring_1=ring_1 - self.ring_2=ring_2 - self.uniqueness=uniqueness - self.minpks=minpks - self.ds_tol=ds_tol - self.max_grains=max_grains + self.gv = gv.astype(float) + self.ds = np.sqrt((gv * gv).sum(axis=1)) + self.ga = np.zeros(len(self.ds), np.int32) - 1 # Grain assignments + self.gvflat = np.ascontiguousarray(gv, float) + self.wedge = 0.0 # Default + + self.cosine_tol = cosine_tol + self.wavelength = wavelength + self.hkl_tol = hkl_tol + self.ring_1 = ring_1 + self.ring_2 = ring_2 + self.uniqueness = uniqueness + self.minpks = minpks + self.ds_tol = ds_tol + self.max_grains = max_grains self.eta_range = eta_range - self.ubis=[] - self.scores=[] - self.index_needs_debug = 0 # track problems quietly... - self.omega_fullrange=0 # ?? + self.ubis = [] + self.scores = [] + self.index_needs_debug = 0 # track problems quietly... + self.omega_fullrange = 0 # ?? # it would make more sense to inherit the parameter object - will # have to think about this some more - how general is it? from ImageD11 import parameters - self.parameterobj = parameters.parameters(cosine_tol=self.cosine_tol, - hkl_tol=self.hkl_tol, ring_1=self.ring_1, ring_2=self.ring_2, - minpks=self.minpks, uniqueness=self.uniqueness, ds_tol=self.ds_tol, - wavelength=self.wavelength, eta_range=self.eta_range, - max_grains=self.max_grains) + + self.parameterobj = parameters.parameters( + cosine_tol=self.cosine_tol, + hkl_tol=self.hkl_tol, + ring_1=self.ring_1, + ring_2=self.ring_2, + minpks=self.minpks, + uniqueness=self.uniqueness, + ds_tol=self.ds_tol, + wavelength=self.wavelength, + eta_range=self.eta_range, + max_grains=self.max_grains, + ) # Add a resetting functionality, adapted from # stackoverflow.com/questions/4866587/pythonic-way-to-reset-an-objects-variables import copy - self.__pristine_dict = copy.deepcopy( self.__dict__ ) + + self.__pristine_dict = copy.deepcopy(self.__dict__) def __getattr__(self, name): - """ got some time lost setting tol which does not exist + """got some time lost setting tol which does not exist this will never be clean :-( """ - if name == 'tol': - raise KeyError('tol not in indexer') - print("WARNING: creating indexer.%s"%(name)) + if name == "tol": + raise KeyError("tol not in indexer") + print("WARNING: creating indexer.%s" % (name)) setattr(self, name, None) - def reset( self ): + def reset(self): """ To get a really clean indexer just create a new one (e.g. via __init__) This was added for the gui to help it forget what happened before but keep parameters as they were set """ import copy - self.__dict__ = copy.deepcopy( self.__pristine_dict ) - self.__pristine_dict = copy.deepcopy( self.__dict__ ) + self.__dict__ = copy.deepcopy(self.__pristine_dict) + self.__pristine_dict = copy.deepcopy(self.__dict__) - def loadpars(self,filename=None): + def loadpars(self, filename=None): if filename is not None: self.parameterobj.loadparameters(filename) # self.parameterobj.update_other(self) # busted CI for logging in windows + py2.7 @@ -359,22 +389,21 @@ def loadpars(self,filename=None): if hasattr(self, parname): setattr(self, parname, self.parameterobj.get(parname)) - def updateparameters(self): self.savepars() - self.pars=self.parameterobj.parameters + self.pars = self.parameterobj.parameters - def savepars(self,filename=None): + def savepars(self, filename=None): self.parameterobj.update_yourself(self) if filename is not None: self.parameterobj.saveparameters(filename) - def out_of_eta_range(self,eta): - """ decide if an eta is going to be kept """ + def out_of_eta_range(self, eta): + """decide if an eta is going to be kept""" e = mod_360(float(eta), 0) if e < abs(self.eta_range) and e > -abs(self.eta_range): return True - if e < -180.+abs(self.eta_range) or e > 180.-abs(self.eta_range): + if e < -180.0 + abs(self.eta_range) or e > 180.0 - abs(self.eta_range): return True return False @@ -383,95 +412,136 @@ def assigntorings(self): Assign the g-vectors to hkl rings """ # rings are in self.unitcell - limit = np.amax( self.ds ) - logging.info("Assign to rings, maximum d-spacing considered: %f"%(limit)) - self.unitcell.makerings(limit, tol = self.ds_tol) + limit = np.amax(self.ds) + logging.info("Assign to rings, maximum d-spacing considered: %f" % (limit)) + self.unitcell.makerings(limit, tol=self.ds_tol) dsr = self.unitcell.ringds # npks npks = len(self.ds) - self.ra = np.zeros(npks, np.int32)-1 + self.ra = np.zeros(npks, np.int32) - 1 self.na = np.zeros(len(dsr), np.int32) - logging.info("Ring assignment array shape",self.ra.shape) + logging.info("Ring assignment array shape", self.ra.shape) tol = float(self.ds_tol) - best = np.zeros(npks, float)+tol + best = np.zeros(npks, float) + tol for j, dscalc in enumerate(dsr): dserr = abs(self.ds - dscalc) sel = dserr < best - self.ra[sel]=j + self.ra[sel] = j best[sel] = dserr[sel] # Report on assignments - ds=np.array(self.ds) - logging.info("Ring ( h, k, l) Mult total indexed to_index ubis peaks_per_ubi tth") + # ds = np.array(self.ds) + logging.info( + "Ring ( h, k, l) Mult total indexed to_index ubis peaks_per_ubi tth" + ) minpks = 0 # try reverse order instead for j in range(len(dsr))[::-1]: - ind = np.compress( np.equal(self.ra,j), np.arange(self.ra.shape[0]) ) - self.na[j]=ind.shape[0] - n_indexed = np.sum(np.where( self.ga[ind] > -1, 1, 0)) - n_to_index = np.sum(np.where( self.ga[ind] == -1, 1, 0)) + ind = np.compress(np.equal(self.ra, j), np.arange(self.ra.shape[0])) + self.na[j] = ind.shape[0] + n_indexed = np.sum(np.where(self.ga[ind] > -1, 1, 0)) + n_to_index = np.sum(np.where(self.ga[ind] == -1, 1, 0)) # diffs = abs(take(ds,ind) - dsr[j]) - h=self.unitcell.ringhkls[dsr[j]][0] + h = self.unitcell.ringhkls[dsr[j]][0] Mult = len(self.unitcell.ringhkls[dsr[j]]) try: - expected_orients = int(180./self.omega_fullrange * self.na[j]/float(Mult)) - expected_npks = int(self.omega_fullrange/180. * Mult) + expected_orients = int( + 180.0 / self.omega_fullrange * self.na[j] / float(Mult) + ) + expected_npks = int(self.omega_fullrange / 180.0 * Mult) minpks += expected_npks except: - expected_orients = 'N/A' - expected_npks = 'N/A' - tth = 2*np.degrees(np.arcsin(dsr[j]*self.wavelength/2)) - logging.info("Ring %-3d (%3d,%3d,%3d) %3d %5d %5d %5d %5s %2s %.2f"%( - j,h[0],h[1],h[2],Mult, - self.na[j],n_indexed,n_to_index,expected_orients,expected_npks,tth)) + expected_orients = "N/A" + expected_npks = "N/A" + tth = 2 * np.degrees(np.arcsin(dsr[j] * self.wavelength / 2)) + logging.info( + "Ring %-3d (%3d,%3d,%3d) %3d %5d %5d %5d %5s %2s %.2f" + % ( + j, + h[0], + h[1], + h[2], + Mult, + self.na[j], + n_indexed, + n_to_index, + expected_orients, + expected_npks, + tth, + ) + ) if minpks > 0: - logging.info('\nmin_pks: - Current --> %3d'%(self.minpks)) - logging.info(' - Expected --> %3d\n'%(minpks)) + logging.info("\nmin_pks: - Current --> %3d" % (self.minpks)) + logging.info(" - Expected --> %3d\n" % (minpks)) # We will only attempt to index g-vectors which have been assigned # to hkl rings (this gives a speedup if there # are a lot of spare peaks - ind = np.compress(np.greater(self.ra,-1), np.arange(self.ra.shape[0])) + ind = np.compress(np.greater(self.ra, -1), np.arange(self.ra.shape[0])) self.gvr = self.gv[ind] - logging.info("Using only those peaks which are assigned to rings for scoring trial matrices") - logging.info("Shape of scoring matrix",self.gvr.shape) - self.gvflat=np.ascontiguousarray(self.gvr, float) # Makes it contiguous + logging.info( + "Using only those peaks which are assigned to rings for scoring trial matrices" + ) + logging.info("Shape of scoring matrix", self.gvr.shape) + self.gvflat = np.ascontiguousarray(self.gvr, float) # Makes it contiguous # in memory, hkl fast index - def friedelpairs(self,filename): + def friedelpairs(self, filename): """ Attempt to identify Freidel pairs Peaks must be assigned to the same powder ring Peaks will be the closest thing to being 180 degrees apart """ - out = open(filename,"w") - dsr=self.unitcell.ringds + out = open(filename, "w") + dsr = self.unitcell.ringds nring = len(dsr) - for j in range( nring ): - ind = np.compress(np.equal(self.ra,j), np.arange(self.ra.shape[0])) + for j in range(nring): + ind = np.compress(np.equal(self.ra, j), np.arange(self.ra.shape[0])) # ind is the indices of the ring assigment array - eg which hkl is this gv # - if len(ind)==0: + if len(ind) == 0: continue thesepeaks = self.gv[ind] # - h=self.unitcell.ringhkls[dsr[j]][0] + h = self.unitcell.ringhkls[dsr[j]][0] # - out.write("\n\n\n# h = %d \n"%(h[0])) - out.write("# k = %d \n"%(h[1])) - out.write("# l = %d \n"%(h[2])) - out.write("# npks = %d \n"%(thesepeaks.shape[0])) - out.write("# score eta1 omega1 tth1 gv1_x gv1_y gv1_z eta2 omega2 tth2 gv2_x gv2_y gv2_z\n") + out.write("\n\n\n# h = %d \n" % (h[0])) + out.write("# k = %d \n" % (h[1])) + out.write("# l = %d \n" % (h[2])) + out.write("# npks = %d \n" % (thesepeaks.shape[0])) + out.write( + "# score eta1 omega1 tth1 gv1_x gv1_y gv1_z eta2 omega2 tth2 gv2_x gv2_y gv2_z\n" + ) for k in range(thesepeaks.shape[0]): nearlyzero = thesepeaks + thesepeaks[k] - mag = np.sum(nearlyzero*nearlyzero,1) + mag = np.sum(nearlyzero * nearlyzero, 1) best = np.argmin(mag) if best > k: a = ind[k] - out.write("%f "%( np.sqrt(mag[best]) ) ) - out.write("%f %f %f %f %f %f "%(self.eta[a],self.omega[a],self.tth[a],self.gv[a][0],self.gv[a][1],self.gv[a][2])) + out.write("%f " % (np.sqrt(mag[best]))) + out.write( + "%f %f %f %f %f %f " + % ( + self.eta[a], + self.omega[a], + self.tth[a], + self.gv[a][0], + self.gv[a][1], + self.gv[a][2], + ) + ) a = ind[best] - out.write("%f %f %f %f %f %f "%(self.eta[a],self.omega[a],self.tth[a],self.gv[a][0],self.gv[a][1],self.gv[a][2])) + out.write( + "%f %f %f %f %f %f " + % ( + self.eta[a], + self.omega[a], + self.tth[a], + self.gv[a][0], + self.gv[a][1], + self.gv[a][2], + ) + ) out.write("\n") def score_all_pairs(self, n=None): @@ -480,17 +550,21 @@ def score_all_pairs(self, n=None): """ self.assigntorings() # Which rings have peaks assigned to them? - rings = [r for r in set( self.ra ) if r >= 0 ] + rings = [r for r in set(self.ra) if r >= 0] # What are the multiplicities of these rings? We will use low multiplicity first - mults = {r:len(self.unitcell.ringhkls[self.unitcell.ringds[r]]) for r in rings} + mults = {r: len(self.unitcell.ringhkls[self.unitcell.ringds[r]]) for r in rings} # How many peaks per ring? We will use the sparse rings first... # why? We assume these are the strongest peaks on a weak high angle ring # occupation = {r:self.na[r] for r in rings} - pairs = [(int(mults[r1]*mults[r2]),int(self.na[r1]*self.na[r2]),r1,r2) for r1 in rings for r2 in rings] + pairs = [ + (int(mults[r1] * mults[r2]), int(self.na[r1] * self.na[r2]), r1, r2) + for r1 in rings + for r2 in rings + ] pairs.sort() self.tried = 0 self.npairs = len(pairs) - self.stop=False + self.stop = False k = 0 for mu, oc, r1, r2 in pairs: k += 1 @@ -506,8 +580,13 @@ def score_all_pairs(self, n=None): break if n is not None and k > n: break - logging.info("Tried r1=%d r2=%d attempt %d of %d, got %d grains"%(r1,r2,self.tried,len(pairs),len(self.ubis))) - logging.info("\nTested",self.tried,"pairs and found",len(self.ubis),"grains so far") + logging.info( + "Tried r1=%d r2=%d attempt %d of %d, got %d grains" + % (r1, r2, self.tried, len(pairs), len(self.ubis)) + ) + logging.info( + "\nTested", self.tried, "pairs and found", len(self.ubis), "grains so far" + ) def find(self): """ @@ -519,10 +598,12 @@ def find(self): if self.ra is None: self.assigntorings() iall = np.arange(self.gv.shape[0]) - i1 = np.compress(np.logical_and(np.equal(self.ra,self.ring_1), - self.ga==-1 ) , iall).tolist() - i2 = np.compress(np.logical_and(np.equal(self.ra,self.ring_2), - self.ga==-1 ) , iall).tolist() + i1 = np.compress( + np.logical_and(np.equal(self.ra, self.ring_1), self.ga == -1), iall + ).tolist() + i2 = np.compress( + np.logical_and(np.equal(self.ra, self.ring_2), self.ga == -1), iall + ).tolist() if len(i1) == 0 or len(i2) == 0: logging.info("no peaks left for those rings") return @@ -530,79 +611,81 @@ def find(self): hkls1 = self.unitcell.ringhkls[self.unitcell.ringds[int(self.ring_1)]] hkls2 = self.unitcell.ringhkls[self.unitcell.ringds[int(self.ring_2)]] logging.info("hkls of rings being used for indexing") - logging.info("Ring 1: %s"%(str(hkls1))) - logging.info("Ring 2: %s"%(str(hkls2))) - cosangles=[] + logging.info("Ring 1: %s" % (str(hkls1))) + logging.info("Ring 2: %s" % (str(hkls2))) + cosangles = [] for h1 in hkls1: for h2 in hkls2: - ca=self.unitcell.anglehkls(h1,h2) + ca = self.unitcell.anglehkls(h1, h2) cosangles.append(ca[1]) cosangles.sort() - coses=[] - while len(cosangles)>0: - a=cosangles.pop() - if abs(a-1.)<1e-5 or abs(a+1.)<1e-5: # Throw out 180 degree angles + coses = [] + while len(cosangles) > 0: + a = cosangles.pop() + if ( + abs(a - 1.0) < 1e-5 or abs(a + 1.0) < 1e-5 + ): # Throw out 180 degree angles continue - if len(coses)==0: + if len(coses) == 0: coses.append(a) continue - if abs(coses[-1]-a) > 1e-5: + if abs(coses[-1] - a) > 1e-5: coses.append(a) logging.info("Possible angles and cosines between peaks in rings:") for c in coses: - logging.info("%.6f %.6f"%(math.acos(c)*180/math.pi,c)) + logging.info("%.6f %.6f" % (math.acos(c) * 180 / math.pi, c)) # # - logging.info("Number of peaks in ring 1: %d"%(len(i1))) - logging.info("Number of peaks in ring 2: %d"%(len(i2))) - logging.info("Minimum number of peaks to identify a grain %d"%(self.minpks)) + logging.info("Number of peaks in ring 1: %d" % (len(i1))) + logging.info("Number of peaks in ring 2: %d" % (len(i2))) + logging.info("Minimum number of peaks to identify a grain %d" % (self.minpks)) # print self.gv.shape # ntry=0 # nhits=0 - self.hits=[] - if len(i1)==0 or len(i2)==0: + self.hits = [] + if len(i1) == 0 or len(i2) == 0: # return without crashing please return - tol=float(self.cosine_tol) + tol = float(self.cosine_tol) # ng=0 - mp=np.sqrt(np.sum(self.gv*self.gv,1)) + mp = np.sqrt(np.sum(self.gv * self.gv, 1)) # print mp.shape - ps1 = np.take(self.gv,i1,0) - mp1 = np.take(mp,i1,0) + ps1 = np.take(self.gv, i1, 0) + mp1 = np.take(mp, i1, 0) n1 = ps1.copy() - ps2 = np.take(self.gv,i2,0) - mp2 = np.take(mp,i2,0) + ps2 = np.take(self.gv, i2, 0) + mp2 = np.take(mp, i2, 0) n2 = ps2.copy() # print "mp1.shape",mp1.shape # print "n1[:,1].shape",n1[:,1].shape for i in range(3): - n1[:,i]=n1[:,i]/mp1 - n2[:,i]=n2[:,i]/mp2 - cs = np.array(coses,'d') + n1[:, i] = n1[:, i] / mp1 + n2[:, i] = n2[:, i] / mp2 + cs = np.array(coses, "d") # found=0 - hits=[] + hits = [] start = time.time() self.cosangles = cs - mtol = -tol # Ugly interface - set cosine tolerance negative for all - # instead of best + mtol = -tol # Ugly interface - set cosine tolerance negative for all + # instead of best for i in range(len(i1)): - costheta=np.dot(n2,n1[i]) - if tol > 0: # This is the original algorithm - the closest angle - best,diff = cImageD11.closest(costheta,cs) + costheta = np.dot(n2, n1[i]) + if tol > 0: # This is the original algorithm - the closest angle + best, diff = cImageD11.closest(costheta, cs) if diff < tol: - hits.append( [ diff, i1[i], i2[best] ]) + hits.append([diff, i1[i], i2[best]]) else: for cval in cs: # 1d scalar 1d diff = cval - costheta - candidates = np.compress( abs(diff) < mtol, i2 ) + candidates = np.compress(abs(diff) < mtol, i2) for c in candidates: - hits.append( [ 0.0, i1[i], c ] ) - logging.info("Number of trial orientations generated %d"%(len(hits))) - logging.info("Time taken %.6f /s"%(time.time()-start)) - self.hits=hits + hits.append([0.0, i1[i], c]) + logging.info("Number of trial orientations generated %d" % (len(hits))) + logging.info("Time taken %.6f /s" % (time.time() - start)) + self.hits = hits - def histogram_drlv_fit(self,UBI=None,bins=None): + def histogram_drlv_fit(self, UBI=None, bins=None): """ Generate a histogram of |drlv| for a ubi matrix For use in validation of grains @@ -612,145 +695,171 @@ def histogram_drlv_fit(self,UBI=None,bins=None): else: ubilist = [UBI] if bins is None: - start=0.25 - fac=2 - bins=[start] + start = 0.25 + fac = 2 + bins = [start] while start > 1e-5: - start=start/fac + start = start / fac bins.append(start) bins.append(-start) bins.reverse() - bins=np.array(bins) - hist = np.zeros((len(ubilist),bins.shape[0]-1),int) - j=0 + bins = np.array(bins) + hist = np.zeros((len(ubilist), bins.shape[0] - 1), int) + j = 0 for UBI in ubilist: drlv2 = calc_drlv2(UBI, self.gv) - drlv = np.sort(np.sqrt(drlv2)) # always +ve - if drlv[-1]>0.866: - print("drlv of greater than 0.866!!!",drlv[-1]) - positions = np.searchsorted(drlv,bins) - hist[j,:] = positions[1:]-positions[:-1] - j=j+1 - self.bins=bins - self.histogram=hist + drlv = np.sort(np.sqrt(drlv2)) # always +ve + if drlv[-1] > 0.866: + print("drlv of greater than 0.866!!!", drlv[-1]) + positions = np.searchsorted(drlv, bins) + hist[j, :] = positions[1:] - positions[:-1] + j = j + 1 + self.bins = bins + self.histogram = hist def scorethem(self, fitb4=False): - """ decide which trials listed in hits to keep """ - start=time.time() - ng=0 - tol=float(self.hkl_tol) - gv=self.gvflat - all=len(self.hits) - logging.info("Scoring %d potential orientations"%(all)) - progress=0 - nuniq=0 + """decide which trials listed in hits to keep""" + start = time.time() + ng = 0 + tol = float(self.hkl_tol) + gv = self.gvflat + all = len(self.hits) + logging.info("Scoring %d potential orientations" % (all)) + # progress = 0 + nuniq = 0 # for getind mallocs - drlv2tmp = np.empty( len(self.gv), float ) - labelstmp = np.empty( len(self.gv), np.int32 ) + drlv2tmp = np.empty(len(self.gv), float) + labelstmp = np.empty(len(self.gv), np.int32) while len(self.hits) > 0 and ng < self.max_grains: - diff,i,j = self.hits.pop() - if self.ga[i]>-1 or self.ga[j]>-1 or i==j: + diff, i, j = self.hits.pop() + if self.ga[i] > -1 or self.ga[j] > -1 or i == j: # skip things which are already assigned or errors continue try: - self.unitcell.orient(self.ring_1, - self.gv[i,:], - self.ring_2, - self.gv[j,:], - verbose=0) + self.unitcell.orient( + self.ring_1, self.gv[i, :], self.ring_2, self.gv[j, :], verbose=0 + ) except: - logging.error(" ".join([str(x) for x in (i,j,self.ring_1,self.ring_2)])) + logging.error( + " ".join([str(x) for x in (i, j, self.ring_1, self.ring_2)]) + ) logging.error(str(self.gv[i])) logging.error(str(self.gv[j])) logging.error("Failed to find orientation in unitcell.orient") raise - if fitb4: # FIXME : this does not work - self.unitcell.UBI = ubi_fit_2pks( self.unitell.UBI, self.gv[i,:], self.gv[j,:]) - #npk = cImageD11.score(self.unitcell.UBI,gv,tol) - npk = self.score(self.unitcell.UBI,tol) + if fitb4: # FIXME : this does not work + self.unitcell.UBI = ubi_fit_2pks( + self.unitell.UBI, self.gv[i, :], self.gv[j, :] + ) + # npk = cImageD11.score(self.unitcell.UBI,gv,tol) + npk = self.score(self.unitcell.UBI, tol) UBI = self.unitcell.UBI.copy() if npk > self.minpks: # Try to get a better orientation if we can...: - self.unitcell.orient(self.ring_1, self.gv[i,:], self.ring_2, self.gv[j,:], - verbose=0, crange=abs(self.cosine_tol)) + self.unitcell.orient( + self.ring_1, + self.gv[i, :], + self.ring_2, + self.gv[j, :], + verbose=0, + crange=abs(self.cosine_tol), + ) if fitb4: - for k in range( len(self.unitell.UBIlist) ): - self.unitell.UBIlist[k] = ubi_fit_2pks( self.unitell.UBIlist[k], - self.gv[i,:], self.gv[j,:]) + for k in range(len(self.unitell.UBIlist)): + self.unitell.UBIlist[k] = ubi_fit_2pks( + self.unitell.UBIlist[k], self.gv[i, :], self.gv[j, :] + ) if len(self.unitcell.UBIlist) > 1: - npks=[self.score(UBItest, tol) for UBItest in self.unitcell.UBIlist] + npks = [ + self.score(UBItest, tol) for UBItest in self.unitcell.UBIlist + ] choice = np.argmax(npks) if npks[choice] >= npk: UBI = self.unitcell.UBIlist[choice].copy() npk = npks[choice] - _ = cImageD11.score_and_refine( UBI, gv, tol ) + _ = cImageD11.score_and_refine(UBI, gv, tol) # See if we already have this grain... try: - ind=self.getind(UBI, - drlv2tmp=drlv2tmp, - labelstmp=labelstmp, - ) # indices of peaks indexed - ga=self.ga[ind] # previous grain assignments - uniqueness=np.sum(np.where(ga==-1,1,0))*1.0/ga.shape[0] + ind = self.getind( + UBI, + drlv2tmp=drlv2tmp, + labelstmp=labelstmp, + ) # indices of peaks indexed + ga = self.ga[ind] # previous grain assignments + uniqueness = np.sum(np.where(ga == -1, 1, 0)) * 1.0 / ga.shape[0] if uniqueness > self.uniqueness: - self.ga[ind] = len(self.scores)+1 + self.ga[ind] = len(self.scores) + 1 self.ubis.append(UBI) self.scores.append(npk) - ubistr = (" %.6f"*9)%tuple(UBI.ravel()) - logging.info("new grain %d pks, i %d j %d UBI %s"%(npk,i,j,ubistr)) - ng=ng+1 + ubistr = (" %.6f" * 9) % tuple(UBI.ravel()) + logging.info( + "new grain %d pks, i %d j %d UBI %s" % (npk, i, j, ubistr) + ) + ng = ng + 1 else: - nuniq=nuniq+1 + nuniq = nuniq + 1 except: raise - logging.info("Number of orientations with more than %d peaks is %d"%(self.minpks,len(self.ubis))) - logging.info("Time taken %.3f/s"%(time.time()-start)) - if len(self.ubis)>0: - bestfitting=np.argmax(self.scores) - logging.info("UBI for best fitting\n%s"%(str(self.ubis[bestfitting]))) - logging.info("Unit cell: %s\n"%(str(ubitocellpars(self.ubis[bestfitting])))) - self.refine( self.ubis[bestfitting] ) - logging.info("Indexes %d peaks, with =%f"%(self.scorelastrefined,self.fitlastrefined)) + logging.info( + "Number of orientations with more than %d peaks is %d" + % (self.minpks, len(self.ubis)) + ) + logging.info("Time taken %.3f/s" % (time.time() - start)) + if len(self.ubis) > 0: + bestfitting = np.argmax(self.scores) + logging.info("UBI for best fitting\n%s" % (str(self.ubis[bestfitting]))) + logging.info( + "Unit cell: %s\n" % (str(ubitocellpars(self.ubis[bestfitting]))) + ) + self.refine(self.ubis[bestfitting]) + logging.info( + "Indexes %d peaks, with =%f" + % (self.scorelastrefined, self.fitlastrefined) + ) logging.info("That was the best thing I found so far") - notaccountedfor = ((self.ga < 0) & (self.ra >= 0)).sum() - logging.info("Number of peaks assigned to rings but not indexed = %d"%( notaccountedfor)) + notaccountedfor = ((self.ga < 0) & (self.ra >= 0)).sum() + logging.info( + "Number of peaks assigned to rings but not indexed = %d" + % (notaccountedfor) + ) else: - logging.info("Try again, either with larger tolerance or fewer minimum peaks") + logging.info( + "Try again, either with larger tolerance or fewer minimum peaks" + ) def fight_over_peaks(self): """ Get the best ubis from those proposed Use all peaks (ring assigned or not) """ - self.drlv2 = np.zeros( self.gv.shape[0], float)+2 - labels = np.ones( self.gv.shape[0], np.int32) - np.subtract(labels,2,labels) + self.drlv2 = np.zeros(self.gv.shape[0], float) + 2 + labels = np.ones(self.gv.shape[0], np.int32) + np.subtract(labels, 2, labels) i = -1 for ubi in self.ubis: i += 1 try: - npk = cImageD11.score_and_assign( ubi, self.gv, self.hkl_tol, - self.drlv2, labels, i) + _ = cImageD11.score_and_assign( + ubi, self.gv, self.hkl_tol, self.drlv2, labels, i + ) except: - print(ubi.shape) - print(self.gv.shape) - print(self.hkl_tol) - print(self.drlv2.shape) - print(labels.shape) - print("Error in fight_over_peaks",__file__) - raise + print(ubi.shape) + print(self.gv.shape) + print(self.hkl_tol) + print(self.drlv2.shape) + print(labels.shape) + print("Error in fight_over_peaks", __file__) + raise self.ga = labels # For each grain we want to know how many peaks it indexes # This is a histogram of labels - bins = np.arange(-0.5, len(self.ubis)-0.99) - hst = myhistogram( labels, bins ) + bins = np.arange(-0.5, len(self.ubis) - 0.99) + hst = myhistogram(labels, bins) self.gas = hst assert len(self.gas) == len(self.ubis) - - - def saveindexing(self, filename, tol = None): + def saveindexing(self, filename, tol=None): """ Save orientation matrices @@ -758,11 +867,11 @@ def saveindexing(self, filename, tol = None): grain by grain } peak by peak } """ - f = open(filename,"w") + f = open(filename, "w") i = 0 from ImageD11 import transform - self.gv = np.ascontiguousarray( self.gv ) + self.gv = np.ascontiguousarray(self.gv) # grain assignment self.fight_over_peaks() @@ -770,165 +879,179 @@ def saveindexing(self, filename, tol = None): # Printing per grain uinverses = [] allind = np.array(list(range(len(self.ra)))) - tthcalc =np.zeros(len(self.ra),float) - etacalc =np.zeros(len(self.ra),float) - omegacalc = np.zeros(len(self.ra),float) + tthcalc = np.zeros(len(self.ra), float) + etacalc = np.zeros(len(self.ra), float) + omegacalc = np.zeros(len(self.ra), float) i = -1 - for ubi in self.ubis: i += 1 # Each ubi has peaks in self.ga - uinverses.append( np.linalg.inv(ubi) ) + uinverses.append(np.linalg.inv(ubi)) # self.ga was filled in during fight_over_peaks - npk , mdrlv = cImageD11.refine_assigned( - ubi.copy(), - self.gv, - self.ga, - i) + npk, mdrlv = cImageD11.refine_assigned(ubi.copy(), self.gv, self.ga, i) assert npk == self.gas[i] - f.write("Grain: %d Npeaks=%d =%f\n"%( - i, self.gas[i], np.sqrt(mdrlv) )) - f.write("UBI:\n"+str(ubi)+"\n") + f.write( + "Grain: %d Npeaks=%d =%f\n" % (i, self.gas[i], np.sqrt(mdrlv)) + ) + f.write("UBI:\n" + str(ubi) + "\n") cellpars = ubitocellpars(ubi) f.write("Cell pars: ") - for abc in cellpars[:3]: f.write("%10.6f "%(abc)) - for abc in cellpars[3:]: f.write("%10.3f "%(abc)) + for abc in cellpars[:3]: + f.write("%10.6f " % (abc)) + for abc in cellpars[3:]: + f.write("%10.3f " % (abc)) f.write("\n") # Grainspotter U - f.write("U:\n"+str( ubitoU(ubi) ) + "\n") - f.write("B:\n"+str( ubitoB(ubi) ) + "\n") + f.write("U:\n" + str(ubitoU(ubi)) + "\n") + f.write("B:\n" + str(ubitoB(ubi)) + "\n") # Compute hkls - h = np.dot( ubi, self.gv.T ) - hint = np.floor( h + 0.5 ) - gint= np.dot( uinverses[-1], hint ) - dr = h - hint + h = np.dot(ubi, self.gv.T) + hint = np.floor(h + 0.5) + gint = np.dot(uinverses[-1], hint) + # dr = h - hint - f.write( - "Peak ( h k l ) drlv x y ") + f.write("Peak ( h k l ) drlv x y ") if self.wavelength < 0: f.write("\n") else: f.write( - " Omega_obs Omega_calc Eta_obs Eta_calc tth_obs tth_calc\n") + " Omega_obs Omega_calc Eta_obs Eta_calc tth_obs tth_calc\n" + ) - tc, ec, oc = transform.uncompute_g_vectors( - gint, - self.wavelength, - wedge = self.wedge) - ind = np.compress( self.ga == i, allind) + tc, ec, oc = transform.uncompute_g_vectors( + gint, self.wavelength, wedge=self.wedge + ) + ind = np.compress(self.ga == i, allind) for j in ind: - f.write("%-6d ( % 6.4f % 6.4f % 6.4f ) % 12.8f "%(j,h[0,j],h[1,j],h[2,j],np.sqrt(self.drlv2[j])) ) - f.write(" % 7.1f % 7.1f "%(self.xp[j],self.yp[j]) ) + f.write( + "%-6d ( % 6.4f % 6.4f % 6.4f ) % 12.8f " + % (j, h[0, j], h[1, j], h[2, j], np.sqrt(self.drlv2[j])) + ) + f.write(" % 7.1f % 7.1f " % (self.xp[j], self.yp[j])) if self.wavelength < 0: f.write("\n") else: # # # These should be equal to - to = math.asin( self.wavelength * self.ds[j]/2)*360/math.pi + to = math.asin(self.wavelength * self.ds[j] / 2) * 360 / math.pi # tth observed eo = mod_360(self.eta[j], 0) oo = self.omega[j] tc1 = tc[j] # Choose which is closest in eta/omega, # there are two choices, {eta,omega}, {-eta,omega+180} - w = np.argmin( [ abs(ec[0][j] - eo) , abs(ec[1][j] - eo) ] ) + w = np.argmin([abs(ec[0][j] - eo), abs(ec[1][j] - eo)]) ec1 = ec[w][j] oc1 = oc[w][j] # Now find best omega within 360 degree intervals oc1 = mod_360(oc1, oo) - f.write(" % 9.4f % 9.4f % 9.4f % 9.4f % 9.4f % 9.4f"% (oo,oc1, eo,ec1 ,to,tc1)) - etacalc[ j ] = ec1 - omegacalc[ j ] = oc1 - tthcalc[ j ] = tc1 - if self.ra[j] == -1 : + f.write( + " % 9.4f % 9.4f % 9.4f % 9.4f % 9.4f % 9.4f" + % (oo, oc1, eo, ec1, to, tc1) + ) + etacalc[j] = ec1 + omegacalc[j] = oc1 + tthcalc[j] = tc1 + if self.ra[j] == -1: f.write(" *** was not assigned to ring\n") else: f.write("\n") f.write("\n\n") # peaks assigned to rings - in_rings = np.compress(np.greater(self.ra,-1),np.arange(self.gv.shape[0])) + in_rings = np.compress(np.greater(self.ra, -1), np.arange(self.gv.shape[0])) f.write("\n\nAnd now listing via peaks which were assigned to rings\n") - nleft=0 - nfitted=0 + nleft = 0 + # nfitted = 0 npk = 0 for peak in in_rings: # Compute hkl for each grain - h = self.gv[peak,:] - f.write("\nPeak= %-5d Ring= %-5d gv=[ % -6.4f % -6.4f % -6.4f ] omega= % 9.4f eta= % 9.4f tth= % 9.4f\n"%(peak,self.ra[peak],h[0],h[1],h[2], - self.omega[peak],self.eta[peak],self.tth[peak])) + h = self.gv[peak, :] + f.write( + "\nPeak= %-5d Ring= %-5d gv=[ % -6.4f % -6.4f % -6.4f ] omega= % 9.4f eta= % 9.4f tth= % 9.4f\n" + % ( + peak, + self.ra[peak], + h[0], + h[1], + h[2], + self.omega[peak], + self.eta[peak], + self.tth[peak], + ) + ) if self.ga[peak] != -1: m = self.ga[peak] - hi = np.dot(self.ubis[m],h) - hint = np.floor(hi+0.5).astype(int) + hi = np.dot(self.ubis[m], h) + hint = np.floor(hi + 0.5).astype(int) gint = np.dot(uinverses[m], hint) - f.write("Grain %-5d (%3d,%3d,%3d)"%( m, - hint[0],hint[1],hint[2])) - f.write(" ( % -6.4f % -6.4f % -6.4f ) "%(hi[0],hi[1],hi[2])) + f.write("Grain %-5d (%3d,%3d,%3d)" % (m, hint[0], hint[1], hint[2])) + f.write(" ( % -6.4f % -6.4f % -6.4f ) " % (hi[0], hi[1], hi[2])) # Now find best omega within 360 degree intervals - f.write(" omega= % 9.4f eta= %9.4f tth= %9.4f\n"%( - omegacalc[peak],etacalc[peak],tthcalc[peak]) ) - npk=npk+1 + f.write( + " omega= % 9.4f eta= %9.4f tth= %9.4f\n" + % (omegacalc[peak], etacalc[peak], tthcalc[peak]) + ) + npk = npk + 1 else: - if len(self.ubis)>0: + if len(self.ubis) > 0: f.write("Peak not assigned\n") # , closest=[ % -6.4f % -6.4f % -6.4f ] for grain %d\n"%(hi[0],hi[1],hi[2],m)) else: f.write("Peak not assigned, no grains found\n") - nleft=nleft+1 + nleft = nleft + 1 - f.write("\n\nTotal number of peaks was %d\n"%(self.gv.shape[0])) - f.write("Peaks assigned to grains %d\n"%(npk)) - f.write("Peaks assigned to rings but remaining unindexed %d\n"%(nleft)) + f.write("\n\nTotal number of peaks was %d\n" % (self.gv.shape[0])) + f.write("Peaks assigned to grains %d\n" % (npk)) + f.write("Peaks assigned to rings but remaining unindexed %d\n" % (nleft)) - f.write("Peaks not assigned to rings at all %d\n"%(np.sum(np.where(self.ra==-1,1,0)))) + f.write( + "Peaks not assigned to rings at all %d\n" + % (np.sum(np.where(self.ra == -1, 1, 0))) + ) f.close() - - - - def getind(self,UBI,tol=None, drlv2tmp=None, labelstmp=None): + def getind(self, UBI, tol=None, drlv2tmp=None, labelstmp=None): """ Returns the indices of peaks in self.gv indexed by matrix UBI """ - if tol == None: + if tol is None: tol = self.hkl_tol ng = len(self.gvflat) if drlv2tmp is None: - drlv2 = np.ones( ng , float) + drlv2 = np.ones(ng, float) else: drlv2 = drlv2tmp drlv2[:] = 1 if labelstmp is None: - labels = np.zeros( ng, np.int32 ) + labels = np.zeros(ng, np.int32) else: labels = labelstmp labels[:] = 0 # we only use peaks assigned to rings for scoring here # already done in making gvflat in assigntorings try: - npk = cImageD11.score_and_assign( UBI, self.gv, tol, drlv2, labels, 1 ) + _ = cImageD11.score_and_assign(UBI, self.gv, tol, drlv2, labels, 1) except: - print(self.gvflat.shape) - print('ra',self.ra.shape) - print(drlv2.shape) - print(labels.shape) - print("logic error in getind") - raise + print(self.gvflat.shape) + print("ra", self.ra.shape) + print(drlv2.shape) + print(labels.shape) + print("logic error in getind") + raise return labels == 1 - - def score(self,UBI,tol=None): + def score(self, UBI, tol=None): """ Decide which are the best orientation matrices """ if tol is None: - return cImageD11.score(UBI,self.gv,self.hkl_tol) + return cImageD11.score(UBI, self.gv, self.hkl_tol) else: - return cImageD11.score(UBI,self.gv,tol) + return cImageD11.score(UBI, self.gv, tol) - def refine(self,UBI): + def refine(self, UBI): """ Refine an orientation matrix and rescore it. @@ -940,70 +1063,75 @@ def refine(self,UBI): r = g-vectors h = hkl indices """ -# print "Orientation and unit cell refinement of" -# print "UBI\n",UBI -# print "Scores before",self.score(UBI) + # print "Orientation and unit cell refinement of" + # print "UBI\n",UBI + # print "Scores before",self.score(UBI) # Need to find hkl indices for all of the peaks which are indexed - drlv2=calc_drlv2(UBI,self.gv) + drlv2 = calc_drlv2(UBI, self.gv) h = np.dot(UBI, np.transpose(self.gv)) - hint = np.floor(h + 0.5).astype(int) # rounds down + hint = np.floor(h + 0.5).astype(int) # rounds down tol = float(self.hkl_tol) - tol = tol*tol + tol = tol * tol # Only use peaks which are assigned to rings for refinement - ind = np.compress( np.logical_and(np.less(drlv2,tol),np.greater(self.ra,-1)) , np.arange(self.gv.shape[0]) ) - #scoreb4=ind.shape[0] + ind = np.compress( + np.logical_and(np.less(drlv2, tol), np.greater(self.ra, -1)), + np.arange(self.gv.shape[0]), + ) + # scoreb4=ind.shape[0] contribs = drlv2[ind] - if len(contribs)==0: - raise Exception("No contributing reflections for"+str(UBI)) - #try: + if len(contribs) == 0: + raise Exception("No contributing reflections for" + str(UBI)) + # try: # fitb4=sum(contribs)/contribs.shape[0] - #except: + # except: # print "No contributing reflections for\n",UBI # raise - #drlv2_old=drlv2 - R=np.zeros((3,3),float) - H=np.zeros((3,3),float) + # drlv2_old=drlv2 + R = np.zeros((3, 3), float) + H = np.zeros((3, 3), float) for i in ind: - r = self.gv[i,:] - k = hint[:,i].astype(float) -# print r,k - R = R + np.outer(r,k) - H = H + np.outer(k,k) + r = self.gv[i, :] + k = hint[:, i].astype(float) + # print r,k + R = R + np.outer(r, k) + H = H + np.outer(k, k) try: - HI=np.linalg.inv(H) - UBoptimal=np.dot(R,HI) - UBIo=np.linalg.inv(UBoptimal) + HI = np.linalg.inv(H) + UBoptimal = np.dot(R, HI) + UBIo = np.linalg.inv(UBoptimal) except: # A singular matrix - this sucks. - UBIo=UBI - drlv2 = calc_drlv2(UBIo,self.gv) - ind = np.compress( np.logical_and(np.less(drlv2,tol),np.greater(self.ra,-1)), np.arange(self.gv.shape[0]) ) - self.scorelastrefined=ind.shape[0] + UBIo = UBI + drlv2 = calc_drlv2(UBIo, self.gv) + ind = np.compress( + np.logical_and(np.less(drlv2, tol), np.greater(self.ra, -1)), + np.arange(self.gv.shape[0]), + ) + self.scorelastrefined = ind.shape[0] contribs = drlv2[ind] try: - self.fitlastrefined=math.sqrt(np.sum(contribs)/contribs.shape[0]) + self.fitlastrefined = math.sqrt(np.sum(contribs) / contribs.shape[0]) except: print("\n\n\n") - print("No contributing reflections for\n",UBI) + print("No contributing reflections for\n", UBI) print("After refinement, it was OK before ???") print("\n\n\n") raise -# for i in ind: -# print "( %-6.4f %-6.4f %-6.4f ) %12.8f %12.8f"%(h[0,i],h[1,i],h[2,i],sqrt(drlv2[i]),sqrt(drlv2_old[i])) -# print UBIo -# print "Scores after", self.score(UBIo,self.hkl_tol) -# print "diff\n",UBI-UBIo -# print "Mean drlv now",sum(sqrt(drlv2))/drlv2.shape[0], -# print "Mean drlv old",sum(sqrt(drlv2_old))/drlv2_old.shape[0] + # for i in ind: + # print "( %-6.4f %-6.4f %-6.4f ) %12.8f %12.8f"%(h[0,i],h[1,i],h[2,i],sqrt(drlv2[i]),sqrt(drlv2_old[i])) + # print UBIo + # print "Scores after", self.score(UBIo,self.hkl_tol) + # print "diff\n",UBI-UBIo + # print "Mean drlv now",sum(sqrt(drlv2))/drlv2.shape[0], + # print "Mean drlv old",sum(sqrt(drlv2_old))/drlv2_old.shape[0] return UBIo - def saveubis(self,filename): + def saveubis(self, filename): """ Save the generated ubi matrices into a text file """ write_ubi_file(filename, self.ubis) - def coverage(self): """ Compute the expected coverage of reciprocal space @@ -1012,41 +1140,40 @@ def coverage(self): """ pass - - def readgvfile(self,filename, quiet=False): - f=open(filename,"r") + def readgvfile(self, filename, quiet=False): + f = open(filename, "r") # Lattice!!! self.unitcell = unitcell.cellfromstring(f.readline()) while 1: - line=f.readline() - if line[0]=="#": - if line.find("wavelength")>-1: + line = f.readline() + if line[0] == "#": + if line.find("wavelength") > -1: self.wavelength = float(line.split()[-1]) if not quiet: - print("Got wavelength from gv file of ",self.wavelength) + print("Got wavelength from gv file of ", self.wavelength) continue - if line.find("wedge")>-1: + if line.find("wedge") > -1: self.wedge = float(line.split()[-1]) if not quiet: - print("Got wedge from gv file of ",self.wedge) + print("Got wedge from gv file of ", self.wedge) continue - if line.find("ds h k l")>-1: - continue # reads up to comment line - if line.find("omega")>-1 and line.find("xc")>-1: + if line.find("ds h k l") > -1: + continue # reads up to comment line + if line.find("omega") > -1 and line.find("xc") > -1: break - self.eta=[] # Raw peak information - self.omega=[] - self.ds=[] - self.xr=[] - self.yr=[] - self.zr=[] - self.xp=[] - self.yp=[] - self.omega_ranges=[] + self.eta = [] # Raw peak information + self.omega = [] + self.ds = [] + self.xr = [] + self.yr = [] + self.zr = [] + self.xp = [] + self.yp = [] + self.omega_ranges = [] self.omega_fullrange = 0 try: for line in f.readlines(): - v=[float(x) for x in line.split()] + v = [float(x) for x in line.split()] if len(v) == 0: # Skip the blank lines continue @@ -1061,40 +1188,53 @@ def readgvfile(self,filename, quiet=False): self.eta.append(v[6]) self.omega.append(v[7]) except: - print("LINE:",line) + print("LINE:", line) raise -# raise "Problem interpreting the last thing I printed" + # raise "Problem interpreting the last thing I printed" f.close() - self.ds = np.array( self.ds ) - self.omega = np.array( self.omega ) - sorted_omega = np.unique( np.round( self.omega.copy() ).astype(int) ) + self.ds = np.array(self.ds) + self.omega = np.array(self.omega) + sorted_omega = np.unique(np.round(self.omega.copy()).astype(int)) sorted_omega.sort() - self.omega_ranges.append( sorted_omega[0] ) - domega = sorted_omega[1:] - sorted_omega[:-1] - for i in range(len(sorted_omega)-1): - if sorted_omega[i+1]-sorted_omega[i] > 10: + self.omega_ranges.append(sorted_omega[0]) + # domega = sorted_omega[1:] - sorted_omega[:-1] + for i in range(len(sorted_omega) - 1): + if sorted_omega[i + 1] - sorted_omega[i] > 10: self.omega_ranges.append(sorted_omega[i]) - self.omega_ranges.append(sorted_omega[i+1]) + self.omega_ranges.append(sorted_omega[i + 1]) self.omega_ranges.append(sorted_omega[-1]) if not quiet: - print('\nGot %d sets of omega values from gv file:'%(len(self.omega_ranges)/2)) - for i in range(len(self.omega_ranges)//2): # integer division intended here - self.omega_fullrange += self.omega_ranges[2*i+1]-self.omega_ranges[2*i] + print( + "\nGot %d sets of omega values from gv file:" + % (len(self.omega_ranges) / 2) + ) + for i in range(len(self.omega_ranges) // 2): # integer division intended here + self.omega_fullrange += ( + self.omega_ranges[2 * i + 1] - self.omega_ranges[2 * i] + ) if not quiet: - print('- Range %1d from %4d to %4d ==> %3d degrees'%(i+1, \ - self.omega_ranges[2*i], self.omega_ranges[2*i+1], \ - self.omega_ranges[2*i+1]-self.omega_ranges[2*i])) + print( + "- Range %1d from %4d to %4d ==> %3d degrees" + % ( + i + 1, + self.omega_ranges[2 * i], + self.omega_ranges[2 * i + 1], + self.omega_ranges[2 * i + 1] - self.omega_ranges[2 * i], + ) + ) if self.wavelength > 0: - self.tth=np.arcsin(np.array(self.ds)*self.wavelength/2)*360/math.pi + self.tth = ( + np.arcsin(np.array(self.ds) * self.wavelength / 2) * 360 / math.pi + ) else: - self.tth=np.zeros(len(self.ds)) - self.gv=np.transpose(np.array( [ self.xr , self.yr, self.zr ] ,float)) + self.tth = np.zeros(len(self.ds)) + self.gv = np.transpose(np.array([self.xr, self.yr, self.zr], float)) self.allgv = self.gv.copy() - self.ga=np.zeros(len(self.ds),np.int32)-1 # Grain assignments + self.ga = np.zeros(len(self.ds), np.int32) - 1 # Grain assignments - self.gvflat=np.ascontiguousarray(self.gv,float) + self.gvflat = np.ascontiguousarray(self.gv, float) self.gv = self.gvflat.copy() # Makes it contiguous in memory, hkl fast index if not quiet: - print("Read your gv file containing",self.gv.shape) + print("Read your gv file containing", self.gv.shape) diff --git a/ImageD11/labelimage.py b/ImageD11/labelimage.py index 750e4d28..371ec926 100644 --- a/ImageD11/labelimage.py +++ b/ImageD11/labelimage.py @@ -1,4 +1,3 @@ - from __future__ import print_function """ @@ -7,7 +6,6 @@ """ - # ImageD11_v1.0 Software for beamline ID11 # Copyright (C) 2005-2007 Jon Wright # @@ -27,16 +25,38 @@ from ImageD11 import blobcorrector, cImageD11 -# Names of property columns in array -from ImageD11.cImageD11 import s_1, s_I, s_I2,\ - s_fI, s_ffI, s_sI, s_ssI, s_sfI, s_oI, s_ooI, s_foI, s_soI, \ - bb_mn_f, bb_mn_s, bb_mx_f, bb_mx_s, bb_mn_o, bb_mx_o, \ - mx_I, mx_I_f, mx_I_s, mx_I_o, dety, detz, \ - avg_i, f_raw, s_raw, o_raw, f_cen, s_cen, \ - m_ss, m_ff, m_oo, m_sf, m_so, m_fo +# Names of property columns in array +from ImageD11.cImageD11 import ( + s_1, + s_I, + s_I2, + bb_mn_f, + bb_mn_s, + bb_mx_f, + bb_mx_s, + bb_mn_o, + bb_mx_o, + mx_I, + mx_I_f, + mx_I_s, + mx_I_o, + dety, + detz, + avg_i, + f_raw, + s_raw, + o_raw, + f_cen, + s_cen, + m_ss, + m_ff, + m_oo, + m_sf, + m_so, + m_fo, +) -from math import sqrt import sys @@ -46,32 +66,43 @@ # These should match the definitions in # /sware/exp/saxs/doc/SaxsKeywords.pdf def flip1(x, y): - """ fast, slow to dety, detz""" - return x, y + """fast, slow to dety, detz""" + return x, y + + def flip2(x, y): - """ fast, slow to dety, detz""" - return -x, y + """fast, slow to dety, detz""" + return -x, y + + def flip3(x, y): - """ fast, slow to dety, detz""" - return x, -y + """fast, slow to dety, detz""" + return x, -y + + def flip4(x, y): - """ fast, slow to dety, detz""" + """fast, slow to dety, detz""" return -x, -y + + def flip5(x, y): - """ fast, slow to dety, detz""" - return y, x + """fast, slow to dety, detz""" + return y, x + + def flip6(x, y): - """ fast, slow to dety, detz""" - return y, -x -def flip7(x, y): - """ fast, slow to dety, detz""" - return -y, x -def flip8(x, y): - """ fast, slow to dety, detz""" - return -y, -x + """fast, slow to dety, detz""" + return y, -x +def flip7(x, y): + """fast, slow to dety, detz""" + return -y, x + +def flip8(x, y): + """fast, slow to dety, detz""" + return -y, -x class labelimage: @@ -80,76 +111,73 @@ class labelimage: """ titles = "# sc fc omega" - format = " %.4f"*3 + format = " %.4f" * 3 titles += " Number_of_pixels" format += " %.0f" titles += " avg_intensity s_raw f_raw sigs sigf covsf" - format += " %.4f"*6 + format += " %.4f" * 6 titles += " sigo covso covfo" - format += " %.4f"*3 + format += " %.4f" * 3 titles += " sum_intensity sum_intensity^2" format += " %.4f %.4f" titles += " IMax_int IMax_s IMax_f IMax_o" format += " %.4f %.0f %.0f %.4f" titles += " Min_s Max_s Min_f Max_f Min_o Max_o" - format += " %.0f"*4 + " %.4f"*2 + format += " %.0f" * 4 + " %.4f" * 2 titles += " dety detz" - format += " %.4f"*2 + format += " %.4f" * 2 titles += " onfirst onlast spot3d_id" format += " %d %d %d" titles += "\n" format += "\n" - - def __init__(self, - shape, - fileout = sys.stdout, - spatial = blobcorrector.perfect(), - flipper = flip2, - sptfile = sys.stdout ): + def __init__( + self, + shape, + fileout=sys.stdout, + spatial=blobcorrector.perfect(), + flipper=flip2, + sptfile=sys.stdout, + ): """ Shape - image dimensions fileout - writeable stream for merged peaks spatial - correction of of peak positions """ self.shape = shape # Array shape - if not hasattr(sptfile,"write"): + if not hasattr(sptfile, "write"): self.sptfile = open(sptfile, "w") else: - self.sptfile = sptfile # place for peaksearch to print - file object + self.sptfile = sptfile # place for peaksearch to print - file object self.corrector = spatial # applies spatial distortion - self.fs2yz = flipper # generates y/z + self.fs2yz = flipper # generates y/z - self.onfirst = 1 # Flag for first image in series - self.onlast = 0 # Flag for last image in series + self.onfirst = 1 # Flag for first image in series + self.onlast = 0 # Flag for last image in series self.blim = np.zeros(shape, np.int32) # 'current' blob image - self.npk = 0 # Number of peaks on current - self.res = None # properties of current + self.npk = 0 # Number of peaks on current + self.res = None # properties of current - self.threshold = None # cache for writing files + self.threshold = None # cache for writing files - self.lastbl = np.zeros(shape, np.int32)# 'previous' blob image + self.lastbl = np.zeros(shape, np.int32) # 'previous' blob image self.lastres = None - self.lastnp = "FIRST" # Flags initial state - - self.verbose = 0 # For debugging + self.lastnp = "FIRST" # Flags initial state + self.verbose = 0 # For debugging - if hasattr(fileout,"write"): + if hasattr(fileout, "write"): self.outfile = fileout else: - self.outfile = open(fileout,"w") + self.outfile = open(fileout, "w") - self.spot3d_id = 0 # counter for printing + self.spot3d_id = 0 # counter for printing try: self.outfile.write(self.titles) except: - print(type(self.outfile),self.outfile) + print(type(self.outfile), self.outfile) raise - - - def peaksearch(self, data, threshold, omega): """ # Call the c extensions to do the peaksearch, on entry: @@ -158,31 +186,26 @@ def peaksearch(self, data, threshold, omega): # threshold = float - pixels above this number are put into objects """ d = data.astype(np.float32) - self.labelpeaks( d, threshold ) - self.measurepeaks( d, omega ) - + self.labelpeaks(d, threshold) + self.measurepeaks(d, omega) def labelpeaks(self, data, threshold): self.threshold = threshold - self.npk = cImageD11.connectedpixels(data.astype(np.float32), - self.blim, - threshold, - self.verbose) + self.npk = cImageD11.connectedpixels( + data.astype(np.float32), self.blim, threshold, self.verbose + ) def measurepeaks(self, data, omega, blim=None): if blim is not None: self.npk = blim.max() self.blim = blim if self.npk > 0: - self.res = cImageD11.blobproperties(data.astype(np.float32), - self.blim, - self.npk, - omega=omega) + self.res = cImageD11.blobproperties( + data.astype(np.float32), self.blim, self.npk, omega=omega + ) else: # What to do? self.res = None - - def mergelast(self): """ @@ -198,23 +221,25 @@ def mergelast(self): if self.npk > 0 and self.lastnp > 0: # Thanks to Stine West for finding a bug here # - self.npk = cImageD11.bloboverlaps(self.lastbl, - self.lastnp, - self.lastres, - self.blim, - self.npk, - self.res, - self.verbose) + self.npk = cImageD11.bloboverlaps( + self.lastbl, + self.lastnp, + self.lastres, + self.blim, + self.npk, + self.res, + self.verbose, + ) if self.lastnp > 0: # Fill out the moments of the "closed" peaks # print "calling blobmoments with",self.lastres - cImageD11.blob_moments(self.lastres[:self.lastnp]) + cImageD11.blob_moments(self.lastres[: self.lastnp]) # Write them to file - self.outputpeaks(self.lastres[:self.lastnp]) + self.outputpeaks(self.lastres[: self.lastnp]) # lastres is now moved forward into res - self.lastnp = self.npk # This is array dim + self.lastnp = self.npk # This is array dim if self.npk > 0: - self.lastres = self.res[:self.npk] # free old lastres I hope + self.lastres = self.res[: self.npk] # free old lastres I hope else: self.lastres = None # Also swap the blob images @@ -227,18 +252,32 @@ def output2dpeaks(self, file_obj): This is called before mergelast, so we write self.npk/self.res """ - file_obj.write("# Threshold level %f\n"%( self.threshold)) - file_obj.write("# Number_of_pixels Average_counts s f sc fc sig_s sig_f cov_sf IMax_int\n") + file_obj.write("# Threshold level %f\n" % (self.threshold)) + file_obj.write( + "# Number_of_pixels Average_counts s f sc fc sig_s sig_f cov_sf IMax_int\n" + ) cImageD11.blob_moments(self.res) - fs = "%d "+ "%f "*9 + "\n" - for i in self.res[:self.npk]: + fs = "%d " + "%f " * 9 + "\n" + for i in self.res[: self.npk]: if i[s_1] < 0.1: raise Exception("Empty peak on current frame") i[s_cen], i[f_cen] = self.corrector.correct(i[s_raw], i[f_raw]) - file_obj.write(fs % (i[s_1], i[avg_i], i[s_raw], i[f_raw], - i[s_cen], i[f_cen], - i[m_ss], i[m_ff], i[m_sf], i[mx_I])) + file_obj.write( + fs + % ( + i[s_1], + i[avg_i], + i[s_raw], + i[f_raw], + i[s_cen], + i[f_cen], + i[m_ss], + i[m_ff], + i[m_sf], + i[mx_I], + ) + ) file_obj.write("\n") def outputpeaks(self, peaks): @@ -253,25 +292,45 @@ def outputpeaks(self, peaks): # Spline correction i[s_cen], i[f_cen] = self.corrector.correct(i[s_raw], i[f_raw]) i[dety], i[detz] = self.fs2yz(i[f_raw], i[s_raw]) - self.outfile.write(self.format % ( - i[s_cen], i[f_cen], i[o_raw], - i[s_1], i[avg_i], - i[s_raw], i[f_raw], - i[m_ss], i[m_ff], i[m_sf], - i[m_oo], i[m_so], i[m_fo], - i[s_I],i[s_I2], - i[mx_I],i[mx_I_s],i[mx_I_f],i[mx_I_o], - i[bb_mn_s],i[bb_mx_s],i[bb_mn_f],i[bb_mx_f], - i[bb_mn_o],i[bb_mx_o], - i[dety], i[detz], - self.onfirst, self.onlast, self.spot3d_id )) + self.outfile.write( + self.format + % ( + i[s_cen], + i[f_cen], + i[o_raw], + i[s_1], + i[avg_i], + i[s_raw], + i[f_raw], + i[m_ss], + i[m_ff], + i[m_sf], + i[m_oo], + i[m_so], + i[m_fo], + i[s_I], + i[s_I2], + i[mx_I], + i[mx_I_s], + i[mx_I_f], + i[mx_I_o], + i[bb_mn_s], + i[bb_mx_s], + i[bb_mn_f], + i[bb_mx_f], + i[bb_mn_o], + i[bb_mx_o], + i[dety], + i[detz], + self.onfirst, + self.onlast, + self.spot3d_id, + ) + ) self.spot3d_id += 1 if self.onfirst > 0: self.onfirst = 0 - - - def finalise(self): """ Write out the last frame @@ -280,9 +339,6 @@ def finalise(self): if self.lastres is not None: cImageD11.blob_moments(self.lastres) self.outputpeaks(self.lastres) - #if hasattr(self.sptfile, "close"): + # if hasattr(self.sptfile, "close"): # self.sptfile.close() # wonder what that does to stdout - - - diff --git a/ImageD11/lattice_reduction.py b/ImageD11/lattice_reduction.py index 65fb451c..160a2f72 100644 --- a/ImageD11/lattice_reduction.py +++ b/ImageD11/lattice_reduction.py @@ -1,25 +1,38 @@ - from __future__ import print_function from .rc_array import rc_array -from numpy import dot, round_, array, allclose, asarray, fabs,\ - argmin, argmax, sqrt, argsort, take, sum, where, ndarray, eye,\ - zeros, cross, pi, arccos, floor +from numpy import ( + dot, + round_, + array, + allclose, + asarray, + fabs, + argmax, + sqrt, + sum, + where, + eye, + zeros, + cross, + pi, + arccos, + floor, +) from numpy.linalg import inv, LinAlgError import logging # Confirm that dot'ting a 3x3 matrix with a 3x10 gives a 3x10 -assert dot(eye(3), zeros( (3, 10) ) ).shape == (3, 10), \ - "Numpy dot insanity problem" - +assert dot(eye(3), zeros((3, 10))).shape == (3, 10), "Numpy dot insanity problem" + # It is unclear why it is not a 10x3 result (row/col vectors) try: - dot(eye(3), zeros( (10, 3) ) ) + dot(eye(3), zeros((10, 3))) raise Exception("Numpy dot insanity problem") -except ValueError: +except ValueError: pass except: print("Unexpected exception when checking numpy behaviour") @@ -28,44 +41,46 @@ DEBUG = False # Some sort of round off -MIN_VEC2 = 1e-9*1e-9 - +MIN_VEC2 = 1e-9 * 1e-9 def fparl(x, y): - """ fraction of x parallel to y """ - ly2 = dot(1.0*y ,y) + """fraction of x parallel to y""" + ly2 = dot(1.0 * y, y) if ly2 > 1e-9: - return dot(1.0*x, y) / ly2 + return dot(1.0 * x, y) / ly2 return 0 -def mod(x,y): - """ + +def mod(x, y): + """ Returns the part of x with integer multiples of y removed - assert that ||mod(x,y)|| <= ||x|| for all y + assert that ||mod(x,y)|| <= ||x|| for all y """ - if __debug__: - b4 = dot(x,x) - n = round_(fparl(x,y)) + if __debug__: + b4 = dot(x, x) + n = round_(fparl(x, y)) ret = x - n * y if __debug__: - af = dot(ret,ret) - if b4 < af and n != 0 : - print("Bad mod "+str(x) + " " + str(y)) + af = dot(ret, ret) + if b4 < af and n != 0: + print("Bad mod " + str(x) + " " + str(y)) print(ret, b4, af, n) raise Exception("problem in mod") return ret -def sortvec_len( vl ): + +def sortvec_len(vl): """ Sorts according to length (d-s-u) """ # Here v is ALWAYS a shape==(3,) vector - ul = [ ( dot(v,v), tuple(v)) for v in vl ] + ul = [(dot(v, v), tuple(v)) for v in vl] ul.sort() - return asarray([ v[1] for v in ul[::-1] ]) + return asarray([v[1] for v in ul[::-1]]) -def sortvec_xyz( vl ): + +def sortvec_xyz(vl): """ For each choose between x, -x Sorts according to x then y then z components @@ -74,31 +89,35 @@ def sortvec_xyz( vl ): # Choose x/-x for v in vl: # Wonder how to do this with numpy? - ul.append( max( tuple(v),tuple(-asarray(v)) ) ) + ul.append(max(tuple(v), tuple(-asarray(v)))) # Choose x,y,z ul.sort() - return [ asarray(v) for v in ul[::-1] ] + return [asarray(v) for v in ul[::-1]] + class BadVectors(Exception): - """ Raised by lattice class when vectors are coplanar or colinear""" + """Raised by lattice class when vectors are coplanar or colinear""" + pass + def checkvol(v1, v2, v3, min_vec2=MIN_VEC2): - """ Check whether vectors are singular """ - v = dot(v1, cross(v2,v3)) - assert abs(v)> pow(min_vec2, 1.5), "Volume problem" + """Check whether vectors are singular""" + v = dot(v1, cross(v2, v3)) + assert abs(v) > pow(min_vec2, 1.5), "Volume problem" return True -def rsweep( vl ): - """ - One sweep subtracting each from other two - This idea comes from Knuth TAOCP sec 3.3.4C + +def rsweep(vl): + """ + One sweep subtracting each from other two + This idea comes from Knuth TAOCP sec 3.3.4C """ vn = asarray(vl).copy() for i in range(3): - for j in range(i+1,i+3): - k = j%3 - assert i!=k + for j in range(i + 1, i + 3): + k = j % 3 + assert i != k vn[k] = mod(vn[k], vn[i]) return vn @@ -107,19 +126,19 @@ def reduce(v1, v2, v3, min_vec2=MIN_VEC2): """ Try to find a reduced lattice basis of v1, v2, v3 """ - assert checkvol(v1,v2,v3,min_vec2) + assert checkvol(v1, v2, v3, min_vec2) vl = array([v1, v2, v3]) vn = rsweep(vl) i = 0 - while not allclose(vn ,vl) : - vl = [ v.copy() for v in vn ] - vn = rsweep( vl ) + while not allclose(vn, vl): + vl = [v.copy() for v in vn] + vn = rsweep(vl) i += 1 if i > 10: raise Exception("Algorithmic flaw") # choose the "bigger" compared to -v for i in range(3): - vn[i] = sortvec_xyz( [vn[i], -vn[i]] )[0] + vn[i] = sortvec_xyz([vn[i], -vn[i]])[0] return vn @@ -129,7 +148,7 @@ class lattice(object): """ def __init__(self, v1, v2, v3, direction=None, min_vec2=MIN_VEC2): - """ Make the lattice + """Make the lattice Currently v1, v2, v3 are vectors - which gives 3D direction [ 'row' | 'col' ] - if none read from v1, v2, v3 ... means they are row direction lattice vectors or measured scattering @@ -150,15 +169,15 @@ def __init__(self, v1, v2, v3, direction=None, min_vec2=MIN_VEC2): | ------------------- | [ ] [ [ col2x col2y col2z ] ] [ gz ] - This matrix of column vectors is often called + This matrix of column vectors is often called or ... h.T = dot(r2c, g.T) ... since h is column vector - appears here as a row vecot ... Note that numpy.dot transposes its result - eg: s = (3,4) ; + eg: s = (3,4) ; assert dot(zeros( (3,3) ), zeros( s )).shape == s - This is a double head screwing up error. - + This is a double head screwing up error. + [ gx gy gz ] = [ ------- ------- ------- [ hx ] [ [ row0x ] [ row1y ] [ row2z ] ] [ ] [ | row0y | [ row1y ] [ row2z ] ] [ hy ] @@ -167,100 +186,107 @@ def __init__(self, v1, v2, v3, direction=None, min_vec2=MIN_VEC2): ... due to the double head screwing (dot transposes its result) we then have to transpose this too - - + + """ if direction is None: - assert hasattr(v1, 'direction') - assert hasattr(v2, 'direction') - assert hasattr(v3, 'direction') + assert hasattr(v1, "direction") + assert hasattr(v2, "direction") + assert hasattr(v3, "direction") assert v1.direction == v2.direction assert v1.direction == v3.direction direction = v1.direction # Direction is irrelevant for reduction to shortest 3 try: - vl = reduce( v1 , v2, v3, min_vec2 ) - again = reduce( vl[0], vl[1], vl[2], min_vec2 ) + vl = reduce(v1, v2, v3, min_vec2) + again = reduce(vl[0], vl[1], vl[2], min_vec2) except: raise BadVectors() # Check reduction is stable # print vl - assert allclose( array(vl),array(again) ), "Bad reduction %s %s"%( - str(vl), str(again)) + assert allclose(array(vl), array(again)), "Bad reduction %s %s" % ( + str(vl), + str(again), + ) # This cause a problem - why? # vl = sortvec_len( vl ) try: - if direction == 'col': + if direction == "col": # print "Supplied col direction vectors" - self.r2c = array(vl) + self.r2c = array(vl) self.c2r = inv(self.r2c) - elif direction == 'row': + elif direction == "row": # Supplied with g-vectors # print "Supplied row direction vectors" self.c2r = array(vl).T - self.r2c = inv(self.c2r) + self.r2c = inv(self.c2r) else: - raise Exception("Direction must be row or col "+str(direction)) + raise Exception("Direction must be row or col " + str(direction)) except LinAlgError: print("problem with vectors") - print(v1,v2,v3) + print(v1, v2, v3) print("Reduced to") print(vl) raise assert self.c2r.shape == (3, 3) assert self.r2c.shape == (3, 3) - + def flip(self, v): """ See also __init__.__doc__ """ assert v.check() ret = v.flip(self.matrix(v.direction)) - assert isinstance( ret, rc_array ) + assert isinstance(ret, rc_array) assert ret.check() - assert ret.shape == v.shape[::-1], "Shape mismatch, %s %s %s %s"%( - str(v.shape[::-1]),str(v.shape), str(ret.shape), v.direction) + assert ret.shape == v.shape[::-1], "Shape mismatch, %s %s %s %s" % ( + str(v.shape[::-1]), + str(v.shape), + str(ret.shape), + v.direction, + ) return ret def matrix(self, direction): - if direction == 'row': return self.r2c - if direction == 'col': return self.c2r + if direction == "row": + return self.r2c + if direction == "col": + return self.c2r raise Exception("direction not in row|col") - def nearest(self, vecs): - """ Give back the nearest lattice point indices, - in the same direction """ + """Give back the nearest lattice point indices, + in the same direction""" new_vecs = vecs.flip(self.matrix(vecs.direction)) int_vecs = round_(new_vecs) return int_vecs.flip(self.matrix(int_vecs.direction)) - + def remainders(self, vecs): - """ Difference between vecs and closest lattice points """ + """Difference between vecs and closest lattice points""" vecs.check() return vecs - self.nearest(vecs) def withvec(self, x, direction="col"): - """ - Try to fit x into the lattice + """ + Try to fit x into the lattice Make the remainder together with current vectors Index it as hkl indices whichever vector has the biggest projection is replaced remake the lattice with these 3 vectors """ - assert hasattr(x, 'direction') - r = self.remainders( x ) + assert hasattr(x, "direction") + r = self.remainders(x) worst = argmax(fabs(r)) - if r.direction == 'col': + if r.direction == "col": # supplied vector is from a patterson v = list(self.r2c) - if r.direction == 'row': + if r.direction == "row": # supplied vector is a g-vector v = list(self.c2r.T) - v[worst]=r - l_new = lattice( v[0], v[1], v[2] , direction=r.direction ) + v[worst] = r + l_new = lattice(v[0], v[1], v[2], direction=r.direction) return l_new def score(self, vecs, tol=0.1, debug=False): @@ -272,12 +298,12 @@ def score(self, vecs, tol=0.1, debug=False): diffs = self.remainders(vecs) # Put into other space to compare to tol # ... works for g-vectors as hkl - int_err = diffs.flip( self.matrix( diffs.direction ) ) + int_err = diffs.flip(self.matrix(diffs.direction)) r2 = int_err.norm2() if debug: - print(vecs.shape, r2.shape, tol*tol) + print(vecs.shape, r2.shape, tol * tol) print(r2[:10]) - s = sum( where( r2 < tol * tol, 1, 0) ) + s = sum(where(r2 < tol * tol, 1, 0)) return s @@ -287,9 +313,10 @@ def iter3d_old(n): for i,j,k < n """ for i in range(n): - for j in range(i+1, n): - for k in range(j+1, n): - yield i,j,k + for j in range(i + 1, n): + for k in range(j + 1, n): + yield i, j, k + def iter3d(n): """ @@ -298,29 +325,32 @@ def iter3d(n): This looping was rewritten thanks to: TAOCP V4 fascicle 3, section 7.2.1.3. - + It gives much nicer ordering than previously, as is gradually expands down the list, instead of hammering the start """ - for k in range(2,n): - for j in range(1,k): + for k in range(2, n): + for j in range(1, k): for i in range(j): - yield i,j,k - -#t1 = [ l for l in iter3d_old(10) ] -#t2 = [ l for l in iter3d(10) ] -#print t1 -#print t2 -#assert t1 == t2 - - -def find_lattice(vecs, - min_vec2=1, - n_try=None, - test_vecs = None, - tol = 0.1, - fraction_indexed=0.9, - noisy = False ): + yield i, j, k + + +# t1 = [ l for l in iter3d_old(10) ] +# t2 = [ l for l in iter3d(10) ] +# print t1 +# print t2 +# assert t1 == t2 + + +def find_lattice( + vecs, + min_vec2=1, + n_try=None, + test_vecs=None, + tol=0.1, + fraction_indexed=0.9, + noisy=False, +): """ vecs - vectors to use to generate the lattice min_vec2 - squared length of min_vec (units as vec) @@ -334,59 +364,69 @@ def find_lattice(vecs, else: if n_try > vecs.nvectors(): n_try = vecs.nvectors() - logging.warning("Adjusting number of trial vectors to %d"%(n_try)) + logging.warning("Adjusting number of trial vectors to %d" % (n_try)) if test_vecs is None: test_vecs = vecs assert isinstance(test_vecs, rc_array) gen_dir = vecs[0].direction if noisy: - print("Finding with dir",gen_dir) - for i,v in enumerate(vecs): + print("Finding with dir", gen_dir) + for i, v in enumerate(vecs): print(i, v) if i > n_try: break - print("min_vec2",min_vec2) - for i,j,k in iter3d(n_try): + print("min_vec2", min_vec2) + for i, j, k in iter3d(n_try): # if (i,j,k) == (0,1,6): # print vecs[i],vecs[j],vecs[k] # print gen_dir, min_vec2 if noisy: - print("Try",i,j,k, end=' ') + print("Try", i, j, k, end=" ") try: - if gen_dir == 'row': - if dot(vecs[i], vecs[i]) < min_vec2: continue - if dot(vecs[j], vecs[j]) < min_vec2: continue - if dot(vecs[k], vecs[k]) < min_vec2: continue - print(i,j,k, end=' ') - l = lattice(vecs[i], vecs[j], vecs[k], - direction = gen_dir, - min_vec2 = min_vec2) - elif gen_dir == 'col': + if gen_dir == "row": + if dot(vecs[i], vecs[i]) < min_vec2: + continue + if dot(vecs[j], vecs[j]) < min_vec2: + continue + if dot(vecs[k], vecs[k]) < min_vec2: + continue + print(i, j, k, end=" ") + l = lattice( + vecs[i], vecs[j], vecs[k], direction=gen_dir, min_vec2=min_vec2 + ) + elif gen_dir == "col": try: - if dot(vecs[:,i], vecs[:,i]) < min_vec2: continue - if dot(vecs[:,j], vecs[:,j]) < min_vec2: continue - if dot(vecs[:,k], vecs[:,k]) < min_vec2: continue + if dot(vecs[:, i], vecs[:, i]) < min_vec2: + continue + if dot(vecs[:, j], vecs[:, j]) < min_vec2: + continue + if dot(vecs[:, k], vecs[:, k]) < min_vec2: + continue # print i,j,k,dot(vecs[:,i], vecs[:,i]),dot(vecs[:,j], vecs[:,j]),dot(vecs[:,k], vecs[:,k]) - l = lattice(vecs[:,i], vecs[:,j], vecs[:,k], - direction = gen_dir, - min_vec2 = min_vec2) + l = lattice( + vecs[:, i], + vecs[:, j], + vecs[:, k], + direction=gen_dir, + min_vec2=min_vec2, + ) except IndexError: - print(i,j,k,n_try,vecs.shape) + print(i, j, k, n_try, vecs.shape) raise - + else: raise Exception("Logical impossibility") # First test on vecs - - scor = l.score( vecs, tol ) + + scor = l.score(vecs, tol) frac = 1.0 * scor / vecs.nvectors() if noisy: - print("Score on vecs",scor,frac, end=' ') - - scor = l.score( test_vecs, tol ) + print("Score on vecs", scor, frac, end=" ") + + scor = l.score(test_vecs, tol) frac = 1.0 * scor / test_vecs.nvectors() if noisy: - print("score on test_vecs",scor,frac) + print("score on test_vecs", scor, frac) if frac > fraction_indexed: if noisy: print("Returning") @@ -396,62 +436,69 @@ def find_lattice(vecs, return None -def cosangle_vec( ubi, v ): +def cosangle_vec(ubi, v): """ Angle between v in real and reciprocal space eg, is a* parallel to a or not? """ - real = dot( ubi.T, v ) - reci = dot( inv(ubi) , v ) - return dot( real, reci )/sqrt( - dot(real, real) * dot(reci, reci) ) + real = dot(ubi.T, v) + reci = dot(inv(ubi), v) + return dot(real, reci) / sqrt(dot(real, real) * dot(reci, reci)) -def search_2folds( ubi ): +def search_2folds(ubi): """ Inspired by the Yvon Lepage's method for finding lattice symmetry Check for 2 fold axes by measuring the directions between real and reciprocal vectors with the same indices. In the case of 2 fold axes they should be parallel """ - hr = list(range(-2,3)) + hr = list(range(-2, 3)) for h in hr: for k in hr: for l in hr: - if h==0 and k==0 and l==0: + if h == 0 and k == 0 and l == 0: continue - c = cosangle_vec( ubi, [h,k,l] ) - if abs(c - floor( c + 0.5)) < 0.001: - print(h, k, l, c, arccos(c)*180/pi) - + c = cosangle_vec(ubi, [h, k, l]) + if abs(c - floor(c + 0.5)) < 0.001: + print(h, k, l, c, arccos(c) * 180 / pi) def get_options(parser): - parser.add_argument('-v', '--min_vec2', - action='store', - type=float, - dest="min_vec2", - help='Minimum axis length ^2, (angstrom^2) [1.5]', - default = 1.5) - parser.add_argument('-m', '--n_try', - action='store', - type=int, - dest="n_try", - default=None, - help='Number of vectors to test in finding lattice [all]') - parser.add_argument('-f', '--fraction_indexed', - action='store', - type=float, - dest="fraction_indexed", - default=0.9, - help='Fraction of peaks to be indexed') - parser.add_argument('-t','--tol', - action='store', - type=float, - default = 0.1, - dest="tol", - help='tolerance in hkl error for indexing') + parser.add_argument( + "-v", + "--min_vec2", + action="store", + type=float, + dest="min_vec2", + help="Minimum axis length ^2, (angstrom^2) [1.5]", + default=1.5, + ) + parser.add_argument( + "-m", + "--n_try", + action="store", + type=int, + dest="n_try", + default=None, + help="Number of vectors to test in finding lattice [all]", + ) + parser.add_argument( + "-f", + "--fraction_indexed", + action="store", + type=float, + dest="fraction_indexed", + default=0.9, + help="Fraction of peaks to be indexed", + ) + parser.add_argument( + "-t", + "--tol", + action="store", + type=float, + default=0.1, + dest="tol", + help="tolerance in hkl error for indexing", + ) return parser - - - diff --git a/ImageD11/license.py b/ImageD11/license.py index a1413aa3..060d9fbd 100644 --- a/ImageD11/license.py +++ b/ImageD11/license.py @@ -327,4 +327,3 @@ library. If this is what you want to do, use the GNU Library General Public License instead of this License. """ - diff --git a/ImageD11/nbGui/__init__.py b/ImageD11/nbGui/__init__.py index 72024a5c..df15461f 100644 --- a/ImageD11/nbGui/__init__.py +++ b/ImageD11/nbGui/__init__.py @@ -1 +1 @@ -""" nbGui """ \ No newline at end of file +""" nbGui """ diff --git a/ImageD11/nbGui/fit_geometry.py b/ImageD11/nbGui/fit_geometry.py index 9fc90b34..147cb5a3 100644 --- a/ImageD11/nbGui/fit_geometry.py +++ b/ImageD11/nbGui/fit_geometry.py @@ -5,110 +5,122 @@ class FitGeom(transformer.transformer): - + "Gives an IPython notebook UI for the experimental calibrations" try: - if get_ipython().__class__.__name__ == 'ZMQInteractiveShell': + if get_ipython().__class__.__name__ == "ZMQInteractiveShell": interactive = True else: interactive = False except NameError: interactive = False - + def __init__(self): transformer.transformer.__init__(self) self.vars = "y_center z_center distance tilt_y tilt_z".split() - self.steps = (1, 1, 100, 0.01, 0.01, 0) + self.steps = (1, 1, 100, 0.01, 0.01, 0) self.nv = len(self.vars) - def __parCallBack( self, arg ): - self.parameterobj.parameters.update( { arg['owner'].description : arg['new'] } ) + def __parCallBack(self, arg): + self.parameterobj.parameters.update({arg["owner"].description: arg["new"]}) self.__drawPlot() - def __fixVaryCallBack( self, arg ): - name = arg['owner'].description.split(" ")[1] + def __fixVaryCallBack(self, arg): + name = arg["owner"].description.split(" ")[1] vars = self.getvars() - if arg.new and not (name in self.vars): - vars.append( name ) + if arg.new and (name not in self.vars): + vars.append(name) if name in self.vars and not arg.new: vars.remove(name) self.parameterobj.set_varylist(vars) def __fitCallBack(self, arg): - """ fit call back - runs fit """ + """fit call back - runs fit""" lo, hi = self.ax1.get_xlim() - self.fit( lo, hi ) + self.fit(lo, hi) self.__updateGui() def __drawPlot(self): tth, eta = self.compute_tth_eta() - self.pt1.set_data( tth, eta ) + self.pt1.set_data(tth, eta) def __loadCallBack(self, arg): rootTk = Tk() rootTk.withdraw() - rootTk.call('wm', 'attributes', '.', '-topmost', True) + rootTk.call("wm", "attributes", ".", "-topmost", True) filename = filedialog.askopenfilename() - get_ipython().run_line_magic('gui', 'tk') - if not filename == '': + get_ipython().run_line_magic("gui", "tk") + if not filename == "": self.loadfileparameters(filename) self.__updateGui() def __saveCallBack(self, arg): rootTk = Tk() rootTk.withdraw() - rootTk.call('wm', 'attributes', '.', '-topmost', True) + rootTk.call("wm", "attributes", ".", "-topmost", True) filename = filedialog.asksaveasfilename() - get_ipython().run_line_magic('gui', 'tk') - if not filename == '': + get_ipython().run_line_magic("gui", "tk") + if not filename == "": self.parameterobj.saveparameters(filename) def __updateGui(self): for i, pname in enumerate(self.vars): - self.layout[i,0].value = self.parameterobj.get(pname) + self.layout[i, 0].value = self.parameterobj.get(pname) self.__drawPlot() - + def __drawWidgets(self): nv = self.nv vars = self.vars steps = self.steps - self.layout = ipywidgets.GridspecLayout(nv+1,3) - for i,( pname, pstep ) in enumerate( zip( vars, steps ) ) : - self.layout[i,0] = ipywidgets.FloatText( description=pname, - value = self.parameterobj.parameters.get(pname), - step=pstep) - self.layout[i,0].observe( self.__parCallBack , names='value' ) - self.layout[i,1] = ipywidgets.ToggleButton( description="Vary "+pname, - value = pname in self.getvars() ) - self.layout[i,1].observe( self.__fixVaryCallBack, names='value' ) - - self.layout[nv,0] = ipywidgets.FloatText( description='fit_tolerance', - value = self.parameterobj.parameters.get("fit_tolerance"), step=0,) - self.layout[nv,0].observe( self.__parCallBack , names='value' ) - - self.layout[nv,1] = ipywidgets.Button(description="Run Fit (blocks)") - self.layout[nv,1].on_click( self.__fitCallBack ) - - self.layout[0,2] = ipywidgets.Button(description="Load parameters") - self.layout[0,2].on_click( self.__loadCallBack ) - - self.layout[1,2] = ipywidgets.Button(description="Save to a file") - self.layout[1,2].on_click( self.__saveCallBack ) + self.layout = ipywidgets.GridspecLayout(nv + 1, 3) + for i, (pname, pstep) in enumerate(zip(vars, steps)): + self.layout[i, 0] = ipywidgets.FloatText( + description=pname, + value=self.parameterobj.parameters.get(pname), + step=pstep, + ) + self.layout[i, 0].observe(self.__parCallBack, names="value") + self.layout[i, 1] = ipywidgets.ToggleButton( + description="Vary " + pname, value=pname in self.getvars() + ) + self.layout[i, 1].observe(self.__fixVaryCallBack, names="value") + + self.layout[nv, 0] = ipywidgets.FloatText( + description="fit_tolerance", + value=self.parameterobj.parameters.get("fit_tolerance"), + step=0, + ) + self.layout[nv, 0].observe(self.__parCallBack, names="value") + + self.layout[nv, 1] = ipywidgets.Button(description="Run Fit (blocks)") + self.layout[nv, 1].on_click(self.__fitCallBack) + + self.layout[0, 2] = ipywidgets.Button(description="Load parameters") + self.layout[0, 2].on_click(self.__loadCallBack) + + self.layout[1, 2] = ipywidgets.Button(description="Save to a file") + self.layout[1, 2].on_click(self.__saveCallBack) def fitGui(self): if self.__class__.interactive: - self.fig1 = pl.figure(1, figsize=(9,6)) + self.fig1 = pl.figure(1, figsize=(9, 6)) self.ax1 = self.fig1.add_subplot() tth, eta = self.compute_tth_eta() self.addcellpeaks() - self.pt1, = self.ax1.plot( tth, eta, ",") + (self.pt1,) = self.ax1.plot(tth, eta, ",") self.ax1.set(xlabel="tth", ylabel="eta") - self.ax1.plot( self.theorytth, [0,]*len(self.theorytth), "r|", ms=360, alpha=0.2 ) + self.ax1.plot( + self.theorytth, + [ + 0, + ] + * len(self.theorytth), + "r|", + ms=360, + alpha=0.2, + ) # Add controls self.__drawWidgets() display(self.layout) else: - print('Sorry, this Gui works only in IPython notebooks!') - - - + print("Sorry, this Gui works only in IPython notebooks!") diff --git a/ImageD11/nbGui/plot3d.py b/ImageD11/nbGui/plot3d.py index 8de3b360..5f3e48f7 100644 --- a/ImageD11/nbGui/plot3d.py +++ b/ImageD11/nbGui/plot3d.py @@ -1,4 +1,3 @@ - import io import numpy as np import ipywidgets @@ -6,39 +5,69 @@ from ImageD11 import cImageD11 from PIL import Image + def demodata(): - h,k,l = np.mgrid[-3:4,-3:4,-3:4] - gve = np.dot( np.eye(3)*0.1 , (h.ravel(), k.ravel(), l.ravel() )) + h, k, l = np.mgrid[-3:4, -3:4, -3:4] + gve = np.dot(np.eye(3) * 0.1, (h.ravel(), k.ravel(), l.ravel())) return gve + class Plot3d: - - def __init__(self, xyz=demodata(), w=386, h=256, rx=0., ry=0., rz=0., npx = 1 ): - self.rgba = np.empty( (h, w, 4), 'B') + def __init__(self, xyz=demodata(), w=386, h=256, rx=0.0, ry=0.0, rz=0.0, npx=1): + self.rgba = np.empty((h, w, 4), "B") self.xyz = xyz self.npx = npx - self.ipyimg = ipywidgets.Image( ) - self.wrx = ipywidgets.FloatSlider( value=rx, min=-360, max=360.0, step=1, description='rx:', disabled=False, - continuous_update=True, orientation='vertical', readout=True, readout_format='.1f' ) - self.wry = ipywidgets.FloatSlider( value=ry, min=-360, max=360.0, step=1, description='ry:', disabled=False, - continuous_update=True, orientation='vertical', readout=True, readout_format='.1f' ) - self.wrz = ipywidgets.FloatSlider( value=rz, min=-360, max=360.0, step=1, description='rz:', disabled=False, - continuous_update=True, orientation='vertical', readout=True, readout_format='.1f' ) - self.wrx.observe( self.redraw, names='value' ) - self.wry.observe( self.redraw, names='value' ) - self.wrz.observe( self.redraw, names='value' ) + self.ipyimg = ipywidgets.Image() + self.wrx = ipywidgets.FloatSlider( + value=rx, + min=-360, + max=360.0, + step=1, + description="rx:", + disabled=False, + continuous_update=True, + orientation="vertical", + readout=True, + readout_format=".1f", + ) + self.wry = ipywidgets.FloatSlider( + value=ry, + min=-360, + max=360.0, + step=1, + description="ry:", + disabled=False, + continuous_update=True, + orientation="vertical", + readout=True, + readout_format=".1f", + ) + self.wrz = ipywidgets.FloatSlider( + value=rz, + min=-360, + max=360.0, + step=1, + description="rz:", + disabled=False, + continuous_update=True, + orientation="vertical", + readout=True, + readout_format=".1f", + ) + self.wrx.observe(self.redraw, names="value") + self.wry.observe(self.redraw, names="value") + self.wrz.observe(self.redraw, names="value") self.redraw(None) - self.widget = ipywidgets.HBox([ self.ipyimg, self.wrx, self.wry, self.wrz] ) - - def redraw(self,change): - u = scipy.spatial.transform.Rotation.from_euler('XYZ', - (self.wrx.value, self.wry.value, self.wrz.value), - degrees=True).as_matrix() + self.widget = ipywidgets.HBox([self.ipyimg, self.wrx, self.wry, self.wrz]) + + def redraw(self, change): + u = scipy.spatial.transform.Rotation.from_euler( + "XYZ", (self.wrx.value, self.wry.value, self.wrz.value), degrees=True + ).as_matrix() rotated = u.dot(self.xyz) - order = np.argsort( (rotated[2]*100).astype(np.int16) ) - cImageD11.splat( self.rgba, rotated[:,order].T, u.ravel(), self.npx ) + order = np.argsort((rotated[2] * 100).astype(np.int16)) + cImageD11.splat(self.rgba, rotated[:, order].T, u.ravel(), self.npx) img = Image.fromarray(self.rgba) with io.BytesIO() as buffer: - img.save( buffer, format='gif' ) + img.save(buffer, format="gif") self.ipyimg.value = buffer.getvalue() - diff --git a/ImageD11/peakmerge.py b/ImageD11/peakmerge.py index 3e387ef7..f3ce1d98 100644 --- a/ImageD11/peakmerge.py +++ b/ImageD11/peakmerge.py @@ -1,4 +1,3 @@ - from __future__ import print_function # ImageD11_v0.4 Software for beamline ID11 @@ -36,18 +35,21 @@ import logging -def roundfloat(x,tol): +def roundfloat(x, tol): """ Return the float nearest to x stepsize tol """ - return x.__divmod__(tol)[0]*tol + return x.__divmod__(tol)[0] * tol + # print "Using omega tolerance of 1e-5" + class peak: """ Represents a peak """ + def __init__(self, line, omega, threshold, num, tolerance): """ line = line in the outputfile from peaksearch @@ -55,9 +57,9 @@ def __init__(self, line, omega, threshold, num, tolerance): threshold = the threshold level when the peak was found tolerance = the distance (pixels) between peak centres for merging """ - self.TOLERANCE = tolerance # Pixel separation for combining peaks + self.TOLERANCE = tolerance # Pixel separation for combining peaks self.omegatol = 1e-5 - self.omega = roundfloat(omega, self.omegatol) # round to nearest + self.omega = roundfloat(omega, self.omegatol) # round to nearest self.num = num self.line = line self.threshold = threshold @@ -73,7 +75,6 @@ def __init__(self, line, omega, threshold, num, tolerance): self.covxy = v[8] self.forgotten = False - def combine(self, other): """ Combine this peak with another peak (eg merge!) @@ -96,29 +97,43 @@ def combine(self, other): threshold = self.threshold s = self.avg * self.np + other.avg * other.np avg = s / np - omega = (self.omega * self.np * self.avg + - other.omega * other.np * other.avg) / s - num = (self.num * self.np * self.avg + - other.num * other.np * other.avg) / s - x = (self.x * self.np * self.avg + - other.x * other.np * other.avg) / s - y = (self.y * self.np * self.avg + - other.y * other.np * other.avg) / s - xc = (self.xc * self.np * self.avg + - other.xc * other.np * other.avg) / s - yc = (self.yc * self.np * self.avg + - other.yc * other.np * other.avg) / s - sigx = (self.sigx * self.np * self.avg + - other.sigx * other.np * other.avg) / s - sigy = (self.sigy * self.np * self.avg + - other.sigy * other.np * other.avg) / s - covxy = (self.covxy * self.np * self.avg + - other.covxy * other.np * other.avg) / s - self.forgotten=True - other.forgotten=True + omega = ( + self.omega * self.np * self.avg + other.omega * other.np * other.avg + ) / s + num = ( + self.num * self.np * self.avg + other.num * other.np * other.avg + ) / s + x = (self.x * self.np * self.avg + other.x * other.np * other.avg) / s + y = (self.y * self.np * self.avg + other.y * other.np * other.avg) / s + xc = ( + self.xc * self.np * self.avg + other.xc * other.np * other.avg + ) / s + yc = ( + self.yc * self.np * self.avg + other.yc * other.np * other.avg + ) / s + sigx = ( + self.sigx * self.np * self.avg + other.sigx * other.np * other.avg + ) / s + sigy = ( + self.sigy * self.np * self.avg + other.sigy * other.np * other.avg + ) / s + covxy = ( + self.covxy * self.np * self.avg + other.covxy * other.np * other.avg + ) / s + self.forgotten = True + other.forgotten = True # Make a new line - line = "%d %f %f %f %f %f %f %f %f"% ( - np, avg, x, y, xc, yc, sigx, sigy, covxy) + line = "%d %f %f %f %f %f %f %f %f" % ( + np, + avg, + x, + y, + xc, + yc, + sigx, + sigy, + covxy, + ) return peak(line, omega, threshold, num, self.TOLERANCE) def __cmp__(self, other): @@ -148,9 +163,11 @@ def __eq__(self, other): For deciding if peaks overlap """ try: -# print "using __eq__" - if abs(self.xc - other.xc) < self.TOLERANCE and \ - abs(self.yc - other.yc) < self.TOLERANCE : + # print "using __eq__" + if ( + abs(self.xc - other.xc) < self.TOLERANCE + and abs(self.yc - other.yc) < self.TOLERANCE + ): return True else: return False @@ -162,21 +179,23 @@ def __str__(self): """ for printing something """ - return "Peak xc=%f yc=%f omega=%f"% (self.xc, self.yc, self.omega) + return "Peak xc=%f yc=%f omega=%f" % (self.xc, self.yc, self.omega) def __repr__(self): """ Not sure that this is used, again, for printing """ - return "Peak xc=%f yc=%f omega=%f"% (self.xc, self.yc, self.omega) + return "Peak xc=%f yc=%f omega=%f" % (self.xc, self.yc, self.omega) + class pkimage: """ Contains the header information from an image Also pointers to positions of lines in peaksearch output file """ + def __init__(self, name): - """ name is the filename """ + """name is the filename""" self.name = name self.header = {} self.header["File"] = name @@ -192,12 +211,12 @@ def addtoheader(self, headerline): h, v = headerline[1:].split("=") self.header[h.lstrip().rstrip()] = v if h.lstrip().rstrip() == "ANGLES": - # Got the Bruker angles - # map them to ImageD11 geometry + # Got the Bruker angles + # map them to ImageD11 geometry vals = v.split() self.header["TWOTHETA"] = vals[0] self.header["THETA"] = vals[1] - self.header["Omega"] = vals[2] # sorry + self.header["Omega"] = vals[2] # sorry self.header["CHI"] = vals[3] return else: @@ -218,12 +237,11 @@ def otherheaderstuff(self, headerline): if headerline.find("Number_of_pixels") > 0: return if headerline.find("Frame") > 0: - self.header['Frame'] = int(headerline.split()[-1]) + self.header["Frame"] = int(headerline.split()[-1]) return - else: # No equals sign means a threshold level or titles - logging.critical("Could not interpret %s"% (headerline)) - raise Exception("Cannot interpret %s"%(headerline)) - + else: # No equals sign means a threshold level or titles + logging.critical("Could not interpret %s" % (headerline)) + raise Exception("Cannot interpret %s" % (headerline)) class peakmerger: @@ -231,13 +249,14 @@ class peakmerger: The useful class - called by the gui to process peaksearch output into spots """ - def __init__(self, quiet = "No"): + + def __init__(self, quiet="No"): """ object to read in peaksearch output file and merge the peaks quiet arg decides if we spray stdout """ self.quiet = quiet - #print "I am in quiet mode",quiet + # print "I am in quiet mode",quiet self.lines = None self.allpeaks = None self.merged = None @@ -248,23 +267,23 @@ def __init__(self, quiet = "No"): self.omegas = None self.images = None - def setpixeltolerance(self, tolerance = 2): - """ TODO ah... if only """ + def setpixeltolerance(self, tolerance=2): + """TODO ah... if only""" pass - def readpeaks(self, filename, startom = 0., omstep = 1.): + def readpeaks(self, filename, startom=0.0, omstep=1.0): """ Read in the output from the peaksearching script Filename is the output file optionally startom and omstep fill in omega ONLY if missing in file """ - self.lines = open(filename,"r").readlines() + self.lines = open(filename, "r").readlines() # Get a list of filenames, omega angles self.images = [] i = -1 for line in self.lines: i += 1 -# print line[0:5] + # print line[0:5] if line[0:6] == "# File": name = line.split()[-1] if name.find("[0]") > 0: @@ -280,20 +299,20 @@ def readpeaks(self, filename, startom = 0., omstep = 1.): currentimage.addtoheader(line) continue self.imagenumbers = np.zeros(len(self.images), int) - self.omegas = np.zeros(len(self.images), float) + self.omegas = np.zeros(len(self.images), float) i = 0 while i < len(self.images): self.imagenumbers[i] = int(self.images[i].imagenumber) try: - self.omegas[i] = float(self.images[i].header["Omega"]) + self.omegas[i] = float(self.images[i].header["Omega"]) except (KeyError, ValueError): # Oh dear, you have no numerical Omega in your header # om = startom + i * omstep self.images[i].header["Omega"] = om self.omegas[i] = om - i=i+1 - logging.info("Found "+str(len(self.images))+" images") + i = i + 1 + logging.info("Found " + str(len(self.images)) + " images") def getheaderkeys(self): """ @@ -301,93 +320,111 @@ def getheaderkeys(self): """ return list(self.images[0].header.keys()) - def getheaderinfo(self,key): + def getheaderinfo(self, key): """ try to find "key" in the headers and return it """ ret = [] - for im in self.images: + for im in self.images: try: ret.append(im.header[key]) except: - raise Exception("key %s not found"%(key)) + raise Exception("key %s not found" % (key)) try: - # make into numbers if possible + # make into numbers if possible fret = [float(v) for v in ret] return fret except ValueError: # otherwise strings return ret - - - - def harvestpeaks(self, numlim = None, omlim = None, thresholds = None): + def harvestpeaks(self, numlim=None, omlim=None, thresholds=None): """ Harvests the peaks from the images within a range of imagenumbers and/or omega eg: it actually reads the numbers now """ # Check we have read in a file already - if self.lines == None: + if self.lines is None: # we have not read the file yet! - raise Exception( - "You need to read in a peaksearch output file first!") + raise Exception("You need to read in a peaksearch output file first!") start_harvest = time.time() peaks = [] # We now have the ranges in imagenumber and omega if requested if numlim is None: - numinrange = lambda x : True + + def numinrange(x): + return True + else: - numinrange = lambda x : x > min(numlim) and x < max(numlim) + + def numinrangex(x): + return x > min(numlim) and x < max(numlim) + if omlim is None: - ominrange = lambda x : True + + def ominrange(x): + return True + else: - ominrange = lambda x : x > min(omlim) and x < max(omlim) + + def ominrange(x): + return x > min(omlim) and x < max(omlim) + if thresholds is None: - thresholdsinrange = lambda x : True + + def thresholdsinrange(x): + True + else: - thresholdsinrange = lambda x : x > min(thresholds) and \ - x < max(thresholds) + + def thresholdsinrange(x): + x > min(thresholds) and x < max(thresholds) + i = 0 for image in self.images: # Check om = float(image.header["Omega"]) - #print "%50.40f %s"%(om,image.header["Omega"]), + # print "%50.40f %s"%(om,image.header["Omega"]), # print om if numinrange(image.imagenumber) and ominrange(om): - # Read peaks + # Read peaks i = image.linestart + 1 line = self.lines[i] - maxi = len(self.lines)-1 + maxi = len(self.lines) - 1 npks = 0 while line.find("# File") < 0 and i < maxi: - if line.find("Threshold")>0: + if line.find("Threshold") > 0: threshold = float(line.split()[-1]) - if line[0] != '#' and len(line) > 10 and \ - thresholdsinrange(threshold): - p = peak(line, om, threshold, - image.imagenumber, self.tolerance) + if ( + line[0] != "#" + and len(line) > 10 + and thresholdsinrange(threshold) + ): + p = peak(line, om, threshold, image.imagenumber, self.tolerance) peaks.append(p) npks = npks + 1 i = i + 1 line = self.lines[i] else: - logging.info("Rejected peak " + - str((numinrange(image.imagenumber), ominrange(om)) )) - logging.info("Time to read into lists %f"% ( - time.time() - start_harvest) ) + logging.info( + "Rejected peak " + + str((numinrange(image.imagenumber), ominrange(om))) + ) + logging.info("Time to read into lists %f" % (time.time() - start_harvest)) start_harvest = time.time() # DSU - decorate, sort, undecorate - sortable = [ (p.omega, p.xc, p.yc, p) for p in peaks ] + sortable = [(p.omega, p.xc, p.yc, p) for p in peaks] sortable.sort() - self.allpeaks = [ t[3] for t in sortable ] - logging.info("Time to DSU sort: %f %d"% ( - time.time() - start_harvest, len(peaks))) -# # Sort by omega, then x, then y -# peaks.sort() -# print "Time to sort:",time.time()-start,len(peaks) -# self.allpeaks=peaks + self.allpeaks = [t[3] for t in sortable] + logging.info( + "Time to DSU sort: %f %d" % (time.time() - start_harvest, len(peaks)) + ) + + # # Sort by omega, then x, then y + # peaks.sort() + # print "Time to sort:",time.time()-start,len(peaks) + # self.allpeaks=peaks def mergepeaks(self): """ @@ -395,29 +432,31 @@ def mergepeaks(self): More complex than initially planned """ - if self.allpeaks==None: + if self.allpeaks is None: self.harvestpeaks() - if self.allpeaks==None: - raise Exception( - "There are no peaks to merge yet (read+harvest first)") + if self.allpeaks is None: + raise Exception("There are no peaks to merge yet (read+harvest first)") start_merge = time.time() npeaks = len(self.allpeaks) - logging.info("Merging len(self.allpeaks)=%d"% (len(self.allpeaks))) + logging.info("Merging len(self.allpeaks)=%d" % (len(self.allpeaks))) merge1 = [self.allpeaks[0]] i = 1 while i < npeaks: # merge peaks with same omega values - if self.allpeaks[i] == merge1[-1] and \ - abs(self.allpeaks[i].omega - merge1[-1].omega) < \ - merge1[-1].omegatol: + if ( + self.allpeaks[i] == merge1[-1] + and abs(self.allpeaks[i].omega - merge1[-1].omega) < merge1[-1].omegatol + ): merge1[-1] = merge1[-1].combine(self.allpeaks[i]) else: merge1.append(self.allpeaks[i]) i = i + 1 peaks = merge1 npeaks = len(peaks) - logging.info("After merging equivalent omegas npeaks = %d time = %f"% ( - npeaks, time.time()-start_merge)) + logging.info( + "After merging equivalent omegas npeaks = %d time = %f" + % (npeaks, time.time() - start_merge) + ) # Now merge the same peak on adjacent omega angles. # First make a list of unique omega angles i = 0 @@ -431,14 +470,16 @@ def mergepeaks(self): uniq[omega] = i else: pass - #ok - #raise Exception("Peaks apparently not sorted by omegas!!! " + # ok + # raise Exception("Peaks apparently not sorted by omegas!!! " # +str(i)+" "+str(peaks[i].omega)+" "+str(peaks[i-1].omega)) i = i + 1 # nomega = len(list(uniq.keys())) - logging.info("Number of different omegas = %d time = %f"% ( - nomega, time.time()-start_merge)) + logging.info( + "Number of different omegas = %d time = %f" + % (nomega, time.time() - start_merge) + ) # Now merge peaks with adjacent omega angles # Need to find peaks which match each other # Different threshold levels should already have been merged @@ -447,36 +488,38 @@ def mergepeaks(self): merged = [] keys = list(uniq.keys()) keys.sort() - #print keys + # print keys prevframe = [] while i < nomega - 2: first = uniq[keys[i]] - last = uniq[keys[i + 1]] + last = uniq[keys[i + 1]] if last < first: logging.critical("Keysorting problem!") raise Exception("Keysort problem") - #print first,last + # print first,last # # Active peaks are present on the current frame # These can merge with the next frame # active = peaks[first:last] - check = len(active)+len(prevframe) + check = len(active) + len(prevframe) ncarry = len(prevframe) active = active + prevframe prevframe = [] if len(active) != check: logging.critical("Problem here - peakmerge lost something") raise Exception("Problem here - lost something") - nextlast = uniq[keys[i + 2]] + nextlast = uniq[keys[i + 2]] nextframe = peaks[last:nextlast] om = keys[i] - debug = "Setting %-5d %8f with %-6d peaks on this and"% ( - i, om, last - first) - debug += " %-5d peaks on next, %-5d in buffer"% ( - nextlast - last, ncarry) + debug = "Setting %-5d %8f with %-6d peaks on this and" % ( + i, + om, + last - first, + ) + debug += " %-5d peaks on next, %-5d in buffer" % (nextlast - last, ncarry) logging.debug(debug) - #sys.stdout.flush() + # sys.stdout.flush() for peak1 in active: m = 0 if peak1.forgotten: @@ -487,7 +530,7 @@ def mergepeaks(self): if peak1 == peak2: newpeak = peak1.combine(peak2) m = 1 - break # Hope we would only overlap one peak + break # Hope we would only overlap one peak if m == 0: # This peak is no longer active, it did not overlap merged.append(peak1) @@ -500,12 +543,12 @@ def mergepeaks(self): # the last frame just needs to merge with anything in prevframe logging.debug("merging final frame") if nomega == 2: - active = peaks[keys[0]:keys[1]] + active = peaks[keys[0] : keys[1]] first = uniq[keys[1]] last = uniq[keys[2]] if nomega > 2: first = uniq[keys[i]] - last = uniq[keys[i + 1]] + last = uniq[keys[i + 1]] active = prevframe if nomega >= 2: for peak1 in active: @@ -529,9 +572,8 @@ def mergepeaks(self): merged.sort() self.merged = merged logging.info("Finished merging peaks") - logging.info("You have a total of " + str(len(self.merged)) + - " after merging") - logging.info("merging took took %f"% (time.time() - start_merge )) + logging.info("You have a total of " + str(len(self.merged)) + " after merging") + logging.info("merging took took %f" % (time.time() - start_merge)) return def filter(self): @@ -551,37 +593,31 @@ def filter(self): biglist = [] for p in self.merged: biglist.append( - [ p.xc, p.yc, p.omega, - p.np, p.avg, p.x, p.y, - p.sigx, p.sigy, p.covxy] - ) + [p.xc, p.yc, p.omega, p.np, p.avg, p.x, p.y, p.sigx, p.sigy, p.covxy] + ) # Need to filter based on x,y # also based on intensity # also based on shape - self.finalpeaks = np.array( np.transpose(biglist) ) + self.finalpeaks = np.array(np.transpose(biglist)) - def savepeaks(self,filename): + def savepeaks(self, filename): """ # Write out minimal information # list of xcorr,ycorr,omega, try to keep intensity now """ - f=open(filename,"w") - f.write( - "# xc yc omega npixels avg_intensity x_raw y_raw sigx sigy covxy\n") - p = self.finalpeaks + f = open(filename, "w") + f.write("# xc yc omega npixels avg_intensity x_raw y_raw sigx sigy covxy\n") + p = self.finalpeaks for i in range(p.shape[1]): for j in range(p.shape[0]): - f.write("%f "%(p[j,i])) + f.write("%f " % (p[j, i])) f.write("\n") f.close() - - - -if __name__=="__main__": - testfile=sys.argv[1] - fltfile=sys.argv[2] +if __name__ == "__main__": + testfile = sys.argv[1] + fltfile = sys.argv[2] obj = peakmerger() obj.readpeaks(testfile) @@ -590,12 +626,13 @@ def savepeaks(self,filename): obj.filter() obj.savepeaks(fltfile) sys.exit(0) - start=time.time() + start = time.time() import profile - profile.run('obj.readpeaks(testfile)','readpeaks.prof') -# object.readpeaks(testfile) - print("That took",time.time()-start,"/s") - profile.run('obj.harvestpeaks()','harvestpeaks.prof') - profile.run('obj.mergepeaks()','mergepeaks.prof') - profile.run('obj.filter()','filterpeaks.prof') - profile.run('obj.savepeaks(fltfile)','savepeaks.prof') + + profile.run("obj.readpeaks(testfile)", "readpeaks.prof") + # object.readpeaks(testfile) + print("That took", time.time() - start, "/s") + profile.run("obj.harvestpeaks()", "harvestpeaks.prof") + profile.run("obj.mergepeaks()", "mergepeaks.prof") + profile.run("obj.filter()", "filterpeaks.prof") + profile.run("obj.savepeaks(fltfile)", "savepeaks.prof") diff --git a/ImageD11/peaksearcher.py b/ImageD11/peaksearcher.py index 4235f8ef..ed8b2c28 100644 --- a/ImageD11/peaksearcher.py +++ b/ImageD11/peaksearcher.py @@ -1,9 +1,6 @@ - - from __future__ import print_function - # ImageD11_v1.0 Software for beamline ID11 # Copyright (C) 2005-2007 Jon Wright # @@ -22,7 +19,6 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 0211-1307 USA - """ Script for peaksearching images from the command line @@ -34,22 +30,23 @@ # For benchmarking import time + reallystart = time.time() from six.moves import queue + # import threading -from math import sqrt -import sys , glob , os.path +import sys, os.path import numpy # Generic file format opener from fabio -import fabio from fabio.openimage import openimage from ImageD11 import blobcorrector, ImageD11options from ImageD11.correct import correct from ImageD11.labelimage import labelimage from ImageD11 import ImageD11_thread + ImageD11_thread.stop_now = False @@ -58,23 +55,22 @@ def __init__(self): self.start = time.time() self.now = self.start self.msgs = [] - def msg(self,msg): + + def msg(self, msg): self.msgs.append(msg) - def tick(self,msg=""): + + def tick(self, msg=""): now = time.time() - self.msgs.append("%s %.2f/s"%(msg,now-self.now)) + self.msgs.append("%s %.2f/s" % (msg, now - self.now)) self.now = now - def tock(self,msg=""): + + def tock(self, msg=""): self.tick(msg) - print(" ".join(self.msgs),"%.2f/s"% (self.now-self.start)) + print(" ".join(self.msgs), "%.2f/s" % (self.now - self.start)) sys.stdout.flush() -def peaksearch( filename , - data_object , - corrector , - thresholds , - labims ): +def peaksearch(filename, data_object, corrector, thresholds, labims): """ filename : The name of the image file for progress info data_object : Fabio object containing data and header @@ -93,7 +89,7 @@ def peaksearch( filename , for lio in list(labims.values()): f = lio.sptfile f.write("\n\n# File %s\n" % (filename)) - f.write("# Frame %d\n" % (data_object.currentframe) ) + f.write("# Frame %d\n" % (data_object.currentframe)) f.write("# Processed on %s\n" % (time.asctime())) try: f.write("# Spatial correction from %s\n" % (corrector.splinefile)) @@ -102,11 +98,13 @@ def peaksearch( filename , except: pass for item in list(data_object.header.keys()): - if item == "headerstring": # skip + if item == "headerstring": # skip continue try: - f.write("# %s = %s\n" % (item, - str(data_object.header[item]).replace("\n"," "))) + f.write( + "# %s = %s\n" + % (item, str(data_object.header[item]).replace("\n", " ")) + ) except KeyError: pass @@ -120,19 +118,19 @@ def peaksearch( filename , labelim = labims[threshold] f = labelim.sptfile if labelim.shape != picture.shape: - raise "Incompatible blobimage buffer for file %s" %(filename) + raise "Incompatible blobimage buffer for file %s" % (filename) # # # Do the peaksearch - f.write("# Omega = %f\n"%(ome)) + f.write("# Omega = %f\n" % (ome)) labelim.peaksearch(picture, threshold, ome) - f.write("# Threshold = %f\n"%(threshold)) - f.write("# npks = %d\n"%(labelim.npk)) + f.write("# Threshold = %f\n" % (threshold)) + f.write("# npks = %d\n" % (labelim.npk)) # if labelim.npk > 0: labelim.output2dpeaks(f) labelim.mergelast() - t.msg("T=%-5d n=%-5d;" % (int(threshold),labelim.npk)) + t.msg("T=%-5d n=%-5d;" % (int(threshold), labelim.npk)) # Close the output file # Finish progress indicator for this file t.tock() @@ -146,34 +144,39 @@ def peaksearch_driver(options, args): """ ################## debugging still for a in args: - print("arg: "+str(a)+","+str(type(a))) - for o in list(options.__dict__.keys()): # FIXME - if getattr(options,o) in [ "False", "FALSE", "false" ]: - setattr(options,o,False) - if getattr(options,o) in [ "True", "TRUE", "true" ]: - setattr(options,o,True) - print("option:",str(o),str(getattr(options,o)),",",\ - str(type( getattr(options,o) ) )) + print("arg: " + str(a) + "," + str(type(a))) + for o in list(options.__dict__.keys()): # FIXME + if getattr(options, o) in ["False", "FALSE", "false"]: + setattr(options, o, False) + if getattr(options, o) in ["True", "TRUE", "true"]: + setattr(options, o, True) + print( + "option:", + str(o), + str(getattr(options, o)), + ",", + str(type(getattr(options, o))), + ) ################### - print("This peaksearcher is from",__file__) + print("This peaksearcher is from", __file__) if options.killfile is not None and os.path.exists(options.killfile): print("The purpose of the killfile option is to create that file") print("only when you want peaksearcher to stop") print("If the file already exists when you run peaksearcher it is") print("never going to get started") - raise ValueError("Your killfile "+options.killfile+" already exists") + raise ValueError("Your killfile " + options.killfile + " already exists") if options.thresholds is None: raise ValueError("No thresholds supplied [-t 1234]") - if len(args) == 0 and options.stem is None: + if len(args) == 0 and options.stem is None: raise ValueError("No files to process") # What to do about spatial - if options.perfect=="N" and os.path.exists(options.spline): - print("Using spatial from",options.spline) + if options.perfect == "N" and os.path.exists(options.spline): + print("Using spatial from", options.spline) corrfunc = blobcorrector.correctorclass(options.spline) else: print("Avoiding spatial correction") @@ -182,20 +185,21 @@ def peaksearch_driver(options, args): # This is always the case now corrfunc.orientation = "edf" scan = None - if options.format in ['bruker', 'BRUKER', 'Bruker']: + if options.format in ["bruker", "BRUKER", "Bruker"]: extn = "" if options.perfect != "N": print("WARNING: Your spline file is ImageD11 specific") print("... from a fabio converted to edf first") - elif options.format == 'GE': + elif options.format == "GE": extn = "" # KE: This seems to be a mistake and keeps PeakSearch from working in # some cases. Should be revisited if commenting it out causes problems. # options.ndigits = 0 - elif options.format == 'py': + elif options.format == "py": import importlib - sys.path.append( '.' ) - scan = importlib.import_module( options.stem ) + + sys.path.append(".") + scan = importlib.import_module(options.stem) first_image = scan.first_image file_series_object = scan.file_series_object else: @@ -203,51 +207,55 @@ def peaksearch_driver(options, args): if scan is None: if options.interlaced: - f0 = ["%s0_%04d%s"%(options.stem,i,options.format) for i in range( - options.first, - options.last+1)] - f1 = ["%s1_%04d%s"%(options.stem,i,options.format) for i in range( - options.first, - options.last+1)] + f0 = [ + "%s0_%04d%s" % (options.stem, i, options.format) + for i in range(options.first, options.last + 1) + ] + f1 = [ + "%s1_%04d%s" % (options.stem, i, options.format) + for i in range(options.first, options.last + 1) + ] if options.iflip: f1 = [a for a in f1[::-1]] - def fso(f0,f1): - for a,b in zip(f0,f1): + def fso(f0, f1): + for a, b in zip(f0, f1): try: yield fabio.open(a) yield fabio.open(b) except: - print(a,b) + print(a, b) raise - file_series_object = fso(f0,f1) - first_image = openimage( f0[0] ) + + file_series_object = fso(f0, f1) + first_image = openimage(f0[0]) else: import fabio + if options.ndigits > 0: file_name_object = fabio.filename_object( options.stem, - num = options.first, - extension = extn, - digits = options.ndigits) + num=options.first, + extension=extn, + digits=options.ndigits, + ) else: file_name_object = options.stem - - first_image = openimage( file_name_object ) + first_image = openimage(file_name_object) import fabio.file_series + # Use traceback = True for debugging file_series_object = fabio.file_series.new_file_series( - first_image, - nimages = options.last - options.first + 1, - traceback = True ) + first_image, nimages=options.last - options.first + 1, traceback=True + ) # Output files: if options.outfile[-4:] != ".spt": options.outfile = options.outfile + ".spt" - print("Your output file must end with .spt, changing to ",options.outfile) + print("Your output file must end with .spt, changing to ", options.outfile) # Omega overrides @@ -257,54 +265,51 @@ def fso(f0,f1): OMEGAOVERRIDE = options.OMEGAOVERRIDE # Make a blobimage the same size as the first image to process - # List comprehension - convert remaining args to floats # must be unique list so go via a set - thresholds_list = list( set( [float(t) for t in options.thresholds] ) ) + thresholds_list = list(set([float(t) for t in options.thresholds])) thresholds_list.sort() - li_objs={} # label image objects, dict of + li_objs = {} # label image objects, dict of - - s = first_image.data.shape # data array shape + s = first_image.data.shape # data array shape # Create label images for t in thresholds_list: # the last 4 chars are guaranteed to be .spt above - mergefile="%s_t%d.flt"%(options.outfile[:-4], t) - spotfile = "%s_t%d.spt"%(options.outfile[:-4], t) - li_objs[t]=labelimage(shape = s, - fileout = mergefile, - spatial = corrfunc, - sptfile=spotfile) - print("make labelimage",mergefile,spotfile) + mergefile = "%s_t%d.flt" % (options.outfile[:-4], t) + spotfile = "%s_t%d.spt" % (options.outfile[:-4], t) + li_objs[t] = labelimage( + shape=s, fileout=mergefile, spatial=corrfunc, sptfile=spotfile + ) + print("make labelimage", mergefile, spotfile) # Not sure why that was there (I think if glob was used) # files.sort() if options.dark is not None: - print("Using dark (background)",options.dark) - darkimage= openimage(options.dark).data.astype(numpy.float32) + print("Using dark (background)", options.dark) + darkimage = openimage(options.dark).data.astype(numpy.float32) else: - darkimage=None - if options.darkoffset!=0: - print("Adding darkoffset",options.darkoffset) + darkimage = None + if options.darkoffset != 0: + print("Adding darkoffset", options.darkoffset) if darkimage is None: darkimage = options.darkoffset else: darkimage += options.darkoffset if options.flood is not None: - floodimage=openimage(options.flood).data - cen0 = int(floodimage.shape[0]/6) - cen1 = int(floodimage.shape[0]/6) + floodimage = openimage(options.flood).data + cen0 = int(floodimage.shape[0] / 6) + cen1 = int(floodimage.shape[0] / 6) middle = floodimage[cen0:-cen0, cen1:-cen1] - nmid = middle.shape[0]*middle.shape[1] + # nmid = middle.shape[0] * middle.shape[1] floodavg = numpy.mean(middle) - print("Using flood",options.flood,"average value",floodavg) + print("Using flood", options.flood, "average value", floodavg) if floodavg < 0.7 or floodavg > 1.3: print("Your flood image does not seem to be normalised!!!") else: - floodimage=None - start = time.time() + floodimage = None + # start = time.time() print("File being treated in -> out, elapsed time") # If we want to do read-ahead threading we fill up a Queue object with data # objects @@ -313,89 +318,128 @@ def fso(f0,f1): # for now only one if options.oneThread: # Wrap in a function to allow profiling (perhaps? what about globals??) - def go_for_it(file_series_object, darkimage, floodimage, - corrfunc , thresholds_list , li_objs, - OMEGA, OMEGASTEP, OMEGAOVERRIDE ): + def go_for_it( + file_series_object, + darkimage, + floodimage, + corrfunc, + thresholds_list, + li_objs, + OMEGA, + OMEGASTEP, + OMEGAOVERRIDE, + ): for data_object in file_series_object: t = timer() - if not hasattr( data_object, "data"): + if not hasattr(data_object, "data"): # Is usually an IOError - if isinstance( data_object[1], IOError): - sys.stdout.write(data_object[1].strerror + '\n') - # data_object[1].filename + if isinstance(data_object[1], IOError): + sys.stdout.write(data_object[1].strerror + "\n") + # data_object[1].filename else: import traceback - traceback.print_exception(data_object[0],data_object[1],data_object[2]) + + traceback.print_exception( + data_object[0], data_object[1], data_object[2] + ) sys.exit() continue filein = data_object.filename if OMEGAOVERRIDE or "Omega" not in data_object.header: data_object.header["Omega"] = OMEGA OMEGA += OMEGASTEP - OMEGAOVERRIDE = True # once you do it once, continue + OMEGAOVERRIDE = True # once you do it once, continue if not OMEGAOVERRIDE and options.omegamotor != "Omega": - data_object.header["Omega"] = float( data_object.header[options.omegamotor] ) - data_object = correct( data_object, darkimage, floodimage, - do_median = options.median, - monitorval = options.monitorval, - monitorcol = options.monitorcol, - ) - t.tick(filein+" io/cor") - peaksearch( filein, data_object , corrfunc , - thresholds_list , li_objs ) + data_object.header["Omega"] = float( + data_object.header[options.omegamotor] + ) + data_object = correct( + data_object, + darkimage, + floodimage, + do_median=options.median, + monitorval=options.monitorval, + monitorcol=options.monitorcol, + ) + t.tick(filein + " io/cor") + peaksearch(filein, data_object, corrfunc, thresholds_list, li_objs) for t in thresholds_list: li_objs[t].finalise() - go_for_it(file_series_object, darkimage, floodimage, - corrfunc , thresholds_list, li_objs, - OMEGA, OMEGASTEP, OMEGAOVERRIDE ) + go_for_it( + file_series_object, + darkimage, + floodimage, + corrfunc, + thresholds_list, + li_objs, + OMEGA, + OMEGASTEP, + OMEGAOVERRIDE, + ) else: print("Going to use threaded version!") try: # TODO move this to a module ? - class read_only(ImageD11_thread.ImageD11_thread): - def __init__(self, queue, file_series_obj , myname="read_only", - OMEGA=0, OMEGAOVERRIDE = False, OMEGASTEP = 1): - """ Reads files in file_series_obj, writes to queue """ + def __init__( + self, + queue, + file_series_obj, + myname="read_only", + OMEGA=0, + OMEGAOVERRIDE=False, + OMEGASTEP=1, + ): + """Reads files in file_series_obj, writes to queue""" self.queue = queue self.file_series_obj = file_series_obj self.OMEGA = OMEGA self.OMEGAOVERRIDE = OMEGAOVERRIDE self.OMEGASTEP = OMEGASTEP - ImageD11_thread.ImageD11_thread.__init__(self , - myname=myname) - print("Reading thread initialised", end=' ') - + ImageD11_thread.ImageD11_thread.__init__(self, myname=myname) + print("Reading thread initialised", end=" ") def ImageD11_run(self): - """ Read images and copy them to self.queue """ + """Read images and copy them to self.queue""" for data_object in self.file_series_obj: if self.ImageD11_stop_now(): print("Reader thread stopping") break - if not hasattr( data_object, "data" ): - import pdb; pdb.set_trace() + if not hasattr(data_object, "data"): + import pdb + + pdb.set_trace() # Is usually an IOError - if isinstance( data_object[1], IOError): - sys.stdout.write(str(data_object[1].strerror) + '\n') + if isinstance(data_object[1], IOError): + sys.stdout.write(str(data_object[1].strerror) + "\n") else: import traceback - traceback.print_exception(data_object[0],data_object[1],data_object[2]) + + traceback.print_exception( + data_object[0], data_object[1], data_object[2] + ) sys.exit() continue ti = timer() - filein = data_object.filename + "[%d]"%( data_object.currentframe ) + filein = data_object.filename + "[%d]" % ( + data_object.currentframe + ) try: if self.OMEGAOVERRIDE: # print "Over ride due to option",self.OMEGAOVERRIDE data_object.header["Omega"] = self.OMEGA self.OMEGA += self.OMEGASTEP else: - if options.omegamotor != 'Omega' and options.omegamotor in data_object.header: - data_object.header["Omega"] = float(data_object.header[options.omegamotor]) + if ( + options.omegamotor != "Omega" + and options.omegamotor in data_object.header + ): + data_object.header["Omega"] = float( + data_object.header[options.omegamotor] + ) if "Omega" not in data_object.header: # print "Computing omega as not in header" data_object.header["Omega"] = self.OMEGA @@ -407,22 +451,30 @@ def ImageD11_run(self): except: continue ti.tick(filein) - self.queue.put((filein, data_object) , block = True) + self.queue.put((filein, data_object), block=True) ti.tock(" enqueue ") if self.ImageD11_stop_now(): print("Reader thread stopping") break # Flag the end of the series - self.queue.put( (None, None) , block = True) + self.queue.put((None, None), block=True) class correct_one_to_many(ImageD11_thread.ImageD11_thread): - def __init__(self, queue_read, queues_out, thresholds_list, - dark = None , flood = None, myname="correct_one", - monitorcol = None, monitorval = None, - do_median = False): - """ Using a single reading queue retains a global ordering + def __init__( + self, + queue_read, + queues_out, + thresholds_list, + dark=None, + flood=None, + myname="correct_one", + monitorcol=None, + monitorval=None, + do_median=False, + ): + """Using a single reading queue retains a global ordering corrects and copies images to output queues doing - correction once """ + correction once""" self.queue_read = queue_read self.queues_out = queues_out self.dark = dark @@ -431,87 +483,93 @@ def __init__(self, queue_read, queues_out, thresholds_list, self.monitorcol = monitorcol self.monitorval = monitorval self.thresholds_list = thresholds_list - ImageD11_thread.ImageD11_thread.__init__(self , - myname=myname) + ImageD11_thread.ImageD11_thread.__init__(self, myname=myname) def ImageD11_run(self): while not self.ImageD11_stop_now(): ti = timer() - filein, data_object = self.queue_read.get(block = True) + filein, data_object = self.queue_read.get(block=True) if filein is None: for t in self.thresholds_list: - self.queues_out[t].put( (None, None) , - block = True) + self.queues_out[t].put((None, None), block=True) # exit the while 1 break - data_object = correct(data_object, self.dark, - self.flood, - do_median = self.do_median, - monitorval = self.monitorval, - monitorcol = self.monitorcol, - ) - ti.tick(filein+" correct ") + data_object = correct( + data_object, + self.dark, + self.flood, + do_median=self.do_median, + monitorval=self.monitorval, + monitorcol=self.monitorcol, + ) + ti.tick(filein + " correct ") for t in self.thresholds_list: # Hope that data object is read only - self.queues_out[t].put((filein, data_object) , - block = True) + self.queues_out[t].put((filein, data_object), block=True) ti.tock(" enqueue ") print("Corrector thread stopping") - - class peaksearch_one(ImageD11_thread.ImageD11_thread): - def __init__(self, q, corrfunc, threshold, li_obj, - myname="peaksearch_one" ): - """ This will handle a single threshold and labelimage - object """ + def __init__( + self, q, corrfunc, threshold, li_obj, myname="peaksearch_one" + ): + """This will handle a single threshold and labelimage + object""" self.q = q self.corrfunc = corrfunc self.threshold = threshold self.li_obj = li_obj ImageD11_thread.ImageD11_thread.__init__( - self, - myname=myname+"_"+str(threshold)) - + self, myname=myname + "_" + str(threshold) + ) def run(self): while not self.ImageD11_stop_now(): - filein, data_object = self.q.get(block = True) - if not hasattr( data_object, "data" ): + filein, data_object = self.q.get(block=True) + if not hasattr(data_object, "data"): break - peaksearch( filein, data_object , self.corrfunc , - [self.threshold] , - { self.threshold : self.li_obj } ) + peaksearch( + filein, + data_object, + self.corrfunc, + [self.threshold], + {self.threshold: self.li_obj}, + ) self.li_obj.finalise() # 8 MB images - max 40 MB in this queue read_queue = queue.Queue(5) - reader = read_only(read_queue, file_series_object, - OMEGA = OMEGA, - OMEGASTEP = OMEGASTEP, - OMEGAOVERRIDE = OMEGAOVERRIDE ) + reader = read_only( + read_queue, + file_series_object, + OMEGA=OMEGA, + OMEGASTEP=OMEGASTEP, + OMEGAOVERRIDE=OMEGAOVERRIDE, + ) reader.start() queues = {} searchers = {} for t in thresholds_list: - print("make queue and peaksearch for threshold",t) + print("make queue and peaksearch for threshold", t) queues[t] = queue.Queue(3) - searchers[t] = peaksearch_one(queues[t], corrfunc, - t, li_objs[t] ) - corrector = correct_one_to_many( read_queue, - queues, - thresholds_list, - dark=darkimage, - flood=floodimage, - do_median = options.median, - monitorcol = options.monitorcol, - monitorval = options.monitorval) + searchers[t] = peaksearch_one(queues[t], corrfunc, t, li_objs[t]) + corrector = correct_one_to_many( + read_queue, + queues, + thresholds_list, + dark=darkimage, + flood=floodimage, + do_median=options.median, + monitorcol=options.monitorcol, + monitorval=options.monitorval, + ) corrector.start() my_threads = [reader, corrector] for t in thresholds_list[::-1]: searchers[t].start() my_threads.append(searchers[t]) nalive = len(my_threads) + def empty_queue(q): while 1: try: @@ -519,14 +577,16 @@ def empty_queue(q): except: break q.put((None, None), block=False) + while nalive > 0: try: nalive = 0 for thr in my_threads: if thr.is_alive(): nalive += 1 - if options.killfile is not None and \ - os.path.exists(options.killfile): + if options.killfile is not None and os.path.exists( + options.killfile + ): raise KeyboardInterrupt() time.sleep(1) except KeyboardInterrupt: @@ -557,127 +617,257 @@ def empty_queue(q): thr.join(timeout=1) raise - except ImportError: print("Probably no threading module present") raise - def get_options(parser): - """ Add our options to a parser object """ - parser.add_argument("-n", "--namestem", action="store", - dest="stem", type=str, default="data", - help="Name of the files up the digits part "+\ - "eg mydata in mydata0000.edf" ) - parser.add_argument("-F", "--format", action="store", - dest="format",default=".edf", type=str, - help="Image File format, eg edf or bruker or GE or py" ) - parser.add_argument("-f", "--first", action="store", - dest="first", default=0, type=int, - help="Number of first file to process, default=0") - parser.add_argument("-l", "--last", action="store", - dest="last", type=int,default =0, - help="Number of last file to process") - parser.add_argument("-o", "--outfile", action="store", - dest="outfile",default="peaks.spt", type=str, - help="Output filename, default=peaks.spt") - parser.add_argument("-d", "--darkfile", action="store", - dest="dark", default=None, type=ImageD11options.ImageFileType(mode='r'), - help="Dark current filename, to be subtracted, default=None") - dod = 0 - parser.add_argument("-D", "--darkfileoffset", action="store", - dest="darkoffset", default=dod, type=float, - help= - "Constant to subtract from dark to avoid overflows, default=%d"%(dod)) - # s="/data/opid11/inhouse/Frelon2K/spatial2k.spline" - parser.add_argument("-s", "--splinefile", action="store", - dest="spline", default=None, type=ImageD11options.SplineFileType(mode='r'), - help="Spline file for spatial distortion, default=None" ) - parser.add_argument("-p", "--perfect_images", action="store", - choices=["Y","N"], default="N", dest="perfect", - help="Ignore spline Y|N, default=N") - parser.add_argument("-O", "--flood", action="store", - type=ImageD11options.ImageFileType(mode='r'), - default=None, dest="flood", - help="Flood file, default=None") - parser.add_argument("-t", "--threshold", action="append", type=float, - dest="thresholds", default=None, - help="Threshold level, you can have several") - parser.add_argument("--OmegaFromHeader", action="store_false", - dest="OMEGAOVERRIDE", default=False, - help="Read Omega values from headers [default]") - parser.add_argument("--OmegaOverRide", action="store_true", - dest="OMEGAOVERRIDE", default=False, - help="Override Omega values from headers") - parser.add_argument("--singleThread", action="store_true", - dest="oneThread", default=False, - help="Do single threaded processing") - # if you want to do this then instead I think you want - # python -m cProfile -o xx.prof peaksearch.py ... - # python -m pstats xx.prof - # ... sort - # ... stats -# parser.add_argument("--profile", action="store", -# type=ImageD11options.ProfilingFileType, -# dest="profile_file", default=None, -# help="Write profiling information (you will want singleThread too)") - parser.add_argument("-S","--step", action="store", - dest="OMEGASTEP", default=1.0, type=float, - help="Step size in Omega when you have no header info") - parser.add_argument("-T","--start", action="store", - dest="OMEGA", default=0.0, type=float, - help="Start position in Omega when you have no header info") - parser.add_argument("-k","--killfile", action="store", - dest="killfile", default=None, - type=ImageD11options.FileType(), - help="Name of file to create stop the peaksearcher running") - parser.add_argument("--ndigits", action="store", type=int, - dest = "ndigits", default = 4, - help = "Number of digits in file numbering [4]") - parser.add_argument("-P", "--padding", action="store", - choices=["Y","N"], default="Y", dest="padding", - help="Is the image number to padded Y|N, e.g. "\ - "should 1 be 0001 or just 1 in image name, default=Y") - parser.add_argument("-m", "--median1D", action="store_true", - default=False, dest="median", - help="Computes the 1D median, writes it to file .bkm and" \ - +" subtracts it from image. For liquid background"\ - +" on radially transformed images") - parser.add_argument("--monitorcol", action="store", type=str, - dest="monitorcol", - default = None, - help="Header value for incident beam intensity") - parser.add_argument("--monitorval", action="store", type=float, - dest="monitorval", - default = None, - help="Incident beam intensity value to normalise to") - parser.add_argument("--omega_motor", action="store", type=str, - dest = "omegamotor", default = "Omega", - help = "Header value to use for rotation motor position [Omega]") - parser.add_argument("--omega_motor_step", action="store", type=str, - dest = "omegamotorstep", default = "OmegaStep", - help = "Header value to use for rotation width [OmegaStep]") - parser.add_argument("--interlaced", action="store_true", - dest = "interlaced", default = False, - help = "Interlaced DCT scan") - parser.add_argument("--iflip", action="store_true", - dest="iflip", default=False, - help = "Reverse second half of interlaced scan") - return parser - - - - - -def get_help(usage = True): - """ return the help string for online help """ + """Add our options to a parser object""" + parser.add_argument( + "-n", + "--namestem", + action="store", + dest="stem", + type=str, + default="data", + help="Name of the files up the digits part " + "eg mydata in mydata0000.edf", + ) + parser.add_argument( + "-F", + "--format", + action="store", + dest="format", + default=".edf", + type=str, + help="Image File format, eg edf or bruker or GE or py", + ) + parser.add_argument( + "-f", + "--first", + action="store", + dest="first", + default=0, + type=int, + help="Number of first file to process, default=0", + ) + parser.add_argument( + "-l", + "--last", + action="store", + dest="last", + type=int, + default=0, + help="Number of last file to process", + ) + parser.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="peaks.spt", + type=str, + help="Output filename, default=peaks.spt", + ) + parser.add_argument( + "-d", + "--darkfile", + action="store", + dest="dark", + default=None, + type=ImageD11options.ImageFileType(mode="r"), + help="Dark current filename, to be subtracted, default=None", + ) + dod = 0 + parser.add_argument( + "-D", + "--darkfileoffset", + action="store", + dest="darkoffset", + default=dod, + type=float, + help="Constant to subtract from dark to avoid overflows, default=%d" % (dod), + ) + # s="/data/opid11/inhouse/Frelon2K/spatial2k.spline" + parser.add_argument( + "-s", + "--splinefile", + action="store", + dest="spline", + default=None, + type=ImageD11options.SplineFileType(mode="r"), + help="Spline file for spatial distortion, default=None", + ) + parser.add_argument( + "-p", + "--perfect_images", + action="store", + choices=["Y", "N"], + default="N", + dest="perfect", + help="Ignore spline Y|N, default=N", + ) + parser.add_argument( + "-O", + "--flood", + action="store", + type=ImageD11options.ImageFileType(mode="r"), + default=None, + dest="flood", + help="Flood file, default=None", + ) + parser.add_argument( + "-t", + "--threshold", + action="append", + type=float, + dest="thresholds", + default=None, + help="Threshold level, you can have several", + ) + parser.add_argument( + "--OmegaFromHeader", + action="store_false", + dest="OMEGAOVERRIDE", + default=False, + help="Read Omega values from headers [default]", + ) + parser.add_argument( + "--OmegaOverRide", + action="store_true", + dest="OMEGAOVERRIDE", + default=False, + help="Override Omega values from headers", + ) + parser.add_argument( + "--singleThread", + action="store_true", + dest="oneThread", + default=False, + help="Do single threaded processing", + ) + # if you want to do this then instead I think you want + # python -m cProfile -o xx.prof peaksearch.py ... + # python -m pstats xx.prof + # ... sort + # ... stats + # parser.add_argument("--profile", action="store", + # type=ImageD11options.ProfilingFileType, + # dest="profile_file", default=None, + # help="Write profiling information (you will want singleThread too)") + parser.add_argument( + "-S", + "--step", + action="store", + dest="OMEGASTEP", + default=1.0, + type=float, + help="Step size in Omega when you have no header info", + ) + parser.add_argument( + "-T", + "--start", + action="store", + dest="OMEGA", + default=0.0, + type=float, + help="Start position in Omega when you have no header info", + ) + parser.add_argument( + "-k", + "--killfile", + action="store", + dest="killfile", + default=None, + type=ImageD11options.FileType(), + help="Name of file to create stop the peaksearcher running", + ) + parser.add_argument( + "--ndigits", + action="store", + type=int, + dest="ndigits", + default=4, + help="Number of digits in file numbering [4]", + ) + parser.add_argument( + "-P", + "--padding", + action="store", + choices=["Y", "N"], + default="Y", + dest="padding", + help="Is the image number to padded Y|N, e.g. " + "should 1 be 0001 or just 1 in image name, default=Y", + ) + parser.add_argument( + "-m", + "--median1D", + action="store_true", + default=False, + dest="median", + help="Computes the 1D median, writes it to file .bkm and" + + " subtracts it from image. For liquid background" + + " on radially transformed images", + ) + parser.add_argument( + "--monitorcol", + action="store", + type=str, + dest="monitorcol", + default=None, + help="Header value for incident beam intensity", + ) + parser.add_argument( + "--monitorval", + action="store", + type=float, + dest="monitorval", + default=None, + help="Incident beam intensity value to normalise to", + ) + parser.add_argument( + "--omega_motor", + action="store", + type=str, + dest="omegamotor", + default="Omega", + help="Header value to use for rotation motor position [Omega]", + ) + parser.add_argument( + "--omega_motor_step", + action="store", + type=str, + dest="omegamotorstep", + default="OmegaStep", + help="Header value to use for rotation width [OmegaStep]", + ) + parser.add_argument( + "--interlaced", + action="store_true", + dest="interlaced", + default=False, + help="Interlaced DCT scan", + ) + parser.add_argument( + "--iflip", + action="store_true", + dest="iflip", + default=False, + help="Reverse second half of interlaced scan", + ) + return parser + + +def get_help(usage=True): + """return the help string for online help""" try: import StringIO as io except: # python3 import io import argparse + if usage: o = get_options(argparse.ArgumentParser()) else: @@ -687,7 +877,5 @@ def get_help(usage = True): return f.getvalue() -if __name__=="__main__": +if __name__ == "__main__": raise Exception("Please use the driver script peaksearch.py") - - diff --git a/ImageD11/project/h5demo1.py b/ImageD11/project/h5demo1.py index 1cc7d7c1..7d4133d2 100644 --- a/ImageD11/project/h5demo1.py +++ b/ImageD11/project/h5demo1.py @@ -1,88 +1,95 @@ - from __future__ import print_function # Make a project file in a hdf. -# example : +# example : # from __future__ import division -import os import numpy as np import h5py import fabio # how the scan was done fmt = "/data/id11/nanoscope/Commissioning/sonja_fib_Al_z500__nobackup/difftomo_Al_y%03d_/interlaced_1_%d/Frelon/interlaced_1_%d_Frelon%04d.edf" -ysteps = np.arange( -60, 60.1, 1) +ysteps = np.arange(-60, 60.1, 1) npts = 360 * 2 ostep = 0.5 ostart = 0.25 # We recorded a series of scans -omegas = [ np.arange( npts, dtype = np.float32 ) * ostep + ostart, ] * len( ysteps ) -dtys = [ np.full( npts, ystep, dtype = np.float32 ) for ystep in ysteps ] +omegas = [ + np.arange(npts, dtype=np.float32) * ostep + ostart, +] * len(ysteps) +dtys = [np.full(npts, ystep, dtype=np.float32) for ystep in ysteps] + -def interlacedscan( ystep, fmt, npts ): +def interlacedscan(ystep, fmt, npts): scan = [] - for i in range( npts//2 ): - scan.append( fmt % ( ystep, 1, 1, i ) ) # forward interlaced - scan.append( fmt % ( ystep, 2, 2, npts//2 - 1 - i) ) + for i in range(npts // 2): + scan.append(fmt % (ystep, 1, 1, i)) # forward interlaced + scan.append(fmt % (ystep, 2, 2, npts // 2 - 1 - i)) return scan -filenames = [ interlacedscan( ystep, fmt, npts ) for ystep in ysteps ] + +filenames = [interlacedscan(ystep, fmt, npts) for ystep in ysteps] # Information about the edf files -def binary_info( filename ): - """ info to read the edf files - assume flat regular files """ - im = fabio.open( filename ) +def binary_info(filename): + """info to read the edf files - assume flat regular files""" + im = fabio.open(filename) return im._frames[0].start, im._frames[0].size, im.data.shape, im.data.dtype + def writesino(h5name, omegas, dtys, filenames): - offset, size, shape, dtype = binary_info( filenames[0][0] ) - print(offset,size,shape,dtype) - nframes = len( omegas[0] ) * len( omegas ) + offset, size, shape, dtype = binary_info(filenames[0][0]) + print(offset, size, shape, dtype) + nframes = len(omegas[0]) * len(omegas) print(nframes, len(omegas), sum(len(o) for o in omegas)) # Now create a hdf5 file: - with h5py.File(h5name, "w", libver='latest' ) as h: + with h5py.File(h5name, "w", libver="latest") as h: # now create a VDS linking within the same file - layout = h5py.VirtualLayout( shape = (nframes, shape[0], shape[1] ), - dtype = dtype ) + layout = h5py.VirtualLayout(shape=(nframes, shape[0], shape[1]), dtype=dtype) j = 0 - graw = h.require_group('scans') + graw = h.require_group("scans") for i, scan in enumerate(filenames): - g = graw.require_group('scan%04d'%(i)) - g.create_dataset( "data", - shape = (len(scan), shape[0], shape[1]), - dtype = dtype, - external = [(fname, offset, size) for fname in scan] ) - g.create_dataset( "omega" , data = omegas[i] ) - g.create_dataset( "dty" , data = dtys[i] ) - vsource = h5py.VirtualSource( h.filename, # ok - circular? - 'scans/scan%04d/data'%(i), - shape = (len(scan), shape[0], shape[1]) ) - layout[ j:j+len(scan), :, :] = vsource + g = graw.require_group("scan%04d" % (i)) + g.create_dataset( + "data", + shape=(len(scan), shape[0], shape[1]), + dtype=dtype, + external=[(fname, offset, size) for fname in scan], + ) + g.create_dataset("omega", data=omegas[i]) + g.create_dataset("dty", data=dtys[i]) + vsource = h5py.VirtualSource( + h.filename, # ok - circular? + "scans/scan%04d/data" % (i), + shape=(len(scan), shape[0], shape[1]), + ) + layout[j : j + len(scan), :, :] = vsource j += len(scan) - g = h.require_group('sinogram') - g.create_dataset('omega', data = np.concatenate(omegas) ) - g.create_dataset('dty', data = np.concatenate(dtys) ) - g.create_virtual_dataset( 'data', layout ) - -h5name = 'demo1.h5' -writesino( h5name, omegas, dtys, filenames ) + g = h.require_group("sinogram") + g.create_dataset("omega", data=np.concatenate(omegas)) + g.create_dataset("dty", data=np.concatenate(dtys)) + g.create_virtual_dataset("data", layout) + + +h5name = "demo1.h5" +writesino(h5name, omegas, dtys, filenames) # Now some tests ...: -for iy in range(0,len(dtys), 31): - print('iy',iy) - for jo in (0,1,123): +for iy in range(0, len(dtys), 31): + print("iy", iy) + for jo in (0, 1, 123): fname = filenames[iy][jo] - print(' ', fname, iy*npts + jo, end= ' ' ) - fab = fabio.open( fname ).data - h5v = h5py.File( h5name, 'r' )[ 'sinogram/data' ][ iy * npts + jo ] + print(" ", fname, iy * npts + jo, end=" ") + fab = fabio.open(fname).data + h5v = h5py.File(h5name, "r")["sinogram/data"][iy * npts + jo] # external data # h5e = h5py.File( h5name, 'r' )[ 'scans/scan%04d/data'%( iy ) ] [jo] if (h5v == fab).all(): - print('ok') + print("ok") else: - print('debug please') - print(fab.shape,fab) - print(h5v.shape,h5v) + print("debug please") + print(fab.shape, fab) + print(h5v.shape, h5v) diff --git a/ImageD11/project/make_h5_project_fails_no_external.py b/ImageD11/project/make_h5_project_fails_no_external.py index c8bc9efa..14cbe2fc 100644 --- a/ImageD11/project/make_h5_project_fails_no_external.py +++ b/ImageD11/project/make_h5_project_fails_no_external.py @@ -1,41 +1,41 @@ - - # Make a project file in a hdf. -# example : +# example : # -import os import h5py, fabio + fmt = "/data/id11/nanoscope/Commissioning/sonja_fib_Al_z500__nobackup/difftomo_Al_y%03d_/interlaced_1_%d/Frelon/interlaced_1_%d_Frelon%04d.edf" args = "ystep", "ipass", "ipass", "fnum" -ysteps = range(-60,61) +ysteps = range(-60, 61) # interlaced frames = [] for ystep in ysteps: for fnum in range(360): # forward - frames.append( fmt % ( ystep, 1, 1, fnum ) ) + frames.append(fmt % (ystep, 1, 1, fnum)) # and back - frames.append( fmt % ( ystep, 2, 2, 359-fnum ) ) - -print("total frames:",len(frames)) + frames.append(fmt % (ystep, 2, 2, 359 - fnum)) + +print("total frames:", len(frames)) for f in frames[:10]: print(f) for f in frames[-10:]: print(f) - -im = fabio.open( frames[0] ) + +im = fabio.open(frames[0]) # hacky bsize = im._frames[0].size boffset = im._frames[0].start bshape = im.data.shape -with h5py.File("/tmp/demo.hdf" , "w" ) as h: - g = h.require_group('sinogram') - g.create_dataset( "data", - shape = (len(frames), bshape[0], bshape[1]), - dtype = im.data.dtype, - external = [(fname, boffset, bsize) for fname in frames] ) +with h5py.File("/tmp/demo.hdf", "w") as h: + g = h.require_group("sinogram") + g.create_dataset( + "data", + shape=(len(frames), bshape[0], bshape[1]), + dtype=im.data.dtype, + external=[(fname, boffset, bsize) for fname in frames], + ) # outputs: """ diff --git a/ImageD11/project/project.py b/ImageD11/project/project.py index fa304748..4960351c 100644 --- a/ImageD11/project/project.py +++ b/ImageD11/project/project.py @@ -1,6 +1,5 @@ - from __future__ import print_function, division -import collections, json +import collections """ Load and save a data analysis project @@ -23,21 +22,22 @@ """ -RotationNode = collections.namedtuple( "RotationNode", - "filename Omega" ) +RotationNode = collections.namedtuple("RotationNode", "filename Omega") + +DiffTomoNode = collections.namedtuple("DiffTomoNode", "filename Omega dty") -DiffTomoNode = collections.namedtuple( "DiffTomoNode", - "filename Omega dty" ) -class Scan( object ): - """ Scan data as a graph - nodes = list of datapoints +class Scan(object): + """Scan data as a graph + nodes = list of datapoints edges = which nodes are adjacent to this node """ - def __init__(self, - nodes = None, # a List[] - edges = None, # a List[[int,]] - ): + + def __init__( + self, + nodes=None, # a List[] + edges=None, # a List[[int,]] + ): """ nodes = measurement points in the scan edges = edges[i] = connections to point i @@ -56,11 +56,11 @@ def __init__(self, for i, node in enumerate(self.nodes): self.node_index[node] = i if __debug__: - for i, e in enumerate(self.edges): # i is implicit + for i, e in enumerate(self.edges): # i is implicit for j in e: - assert j>=0 and i= 0 and i < len(nodes) - def addNode( self, node, neighbors = [] ): + def addNode(self, node, neighbors=[]): """ Adding another node node = node to be added @@ -72,34 +72,34 @@ def addNode( self, node, neighbors = [] ): Can be for interlaced, etc Always stored as positive """ - adr = len(self.nodes) # adr = address in array - self.nodes.append( node ) + adr = len(self.nodes) # adr = address in array + self.nodes.append(node) self.node_index[node] = adr - nbs = [] # nbs = neighbors - err = 0 # bad refs given + nbs = [] # nbs = neighbors + err = 0 # bad refs given for inode in neighbors: if inode > 0 and inode < adr: neighbor = inode elif inode < 0 and inode >= -adr: neighbor = inode + adr else: - err +=1 # Error + err += 1 # Error continue - self.edges[ neighbor ].append( adr ) - nbs.append( neighbor ) - self.edges.append( nbs ) + self.edges[neighbor].append(adr) + nbs.append(neighbor) + self.edges.append(nbs) assert len(self.edges) == len(self.nodes) return err - def neighbors( self, node ): + def neighbors(self, node): """ node = scan point (Image, Omega, etc) node = integer """ - if isinstance( node, int ): + if isinstance(node, int): return self.edges[node] - if isinstance( node, tuple): - i = self.node_index[ node ] + if isinstance(node, tuple): + i = self.node_index[node] return self.edges[i] raise TypeError("node not understood, want tuple or int row index") @@ -107,54 +107,59 @@ def __len__(self): return len(self.nodes) def __getitem__(self, i): - """ We index to the node, not the edges ? """ + """We index to the node, not the edges ?""" return self.nodes[i] - def todict( self ): + def todict(self): """ Convert to a dictionary representation for serialisation """ - data = { "titles": self.nodes[0]._fields, - "rows" : [[field for field in node ] - for node in self.nodes], - "edges" : self.edges } - return data - - def fromdict( self, dct ): + data = { + "titles": self.nodes[0]._fields, + "rows": [[field for field in node] for node in self.nodes], + "edges": self.edges, + } + return data + + def fromdict(self, dct): """ Convert from a dictionary representation for serialisation """ - self.edges = dct['edges'] + self.edges = dct["edges"] # named tuple - tup = collections.namedtuple( "Node", dct['titles'] ) - for i, item in enumerate( dct['rows'] ): - t = tup( *item ) - self.nodes.append( t ) + tup = collections.namedtuple("Node", dct["titles"]) + for i, item in enumerate(dct["rows"]): + t = tup(*item) + self.nodes.append(t) self.node_index[t] = i - -def add_motor( inputscan, motorname, motorpos ): + +def add_motor(inputscan, motorname, motorpos): """ Adds another motor to a scan e.g. dty position when not in header """ fields = inputscan[0]._fields + (motorname,) - tup = collections.namedtuple( "Node", fields ) + tup = collections.namedtuple("Node", fields) try: - if len( inputscan ) == len( motorpos ): - nodes = [ tup(*(vals + (mot,))) for vals, mot in - zip(inputscan, motorpos)] + if len(inputscan) == len(motorpos): + nodes = [tup(*(vals + (mot,))) for vals, mot in zip(inputscan, motorpos)] else: raise Exception("Motorpos has wrong length") - except TypeError: # no len - mot = float( motorpos ) - nodes = [ tup( *(vals + (mot,))) for vals in inputscan ] + except TypeError: # no len + mot = float(motorpos) + nodes = [tup(*(vals + (mot,))) for vals in inputscan] edges = [e for e in inputscan.edges] - return Scan( nodes, edges ) + return Scan(nodes, edges) -def fablescandata_from_spt( spt, headeritems=["Omega",], - nodeclass = RotationNode ): +def fablescandata_from_spt( + spt, + headeritems=[ + "Omega", + ], + nodeclass=RotationNode, +): """ Read an ImageD11 peaksearch spt file to return a Scan object """ @@ -163,11 +168,11 @@ def fablescandata_from_spt( spt, headeritems=["Omega",], motor_mne = counter_mne = [] with open(spt, "r") as sptfile: for line in sptfile.readlines(): - if line[0] != "#": # skip blank and peaks + if line[0] != "#": # skip blank and peaks continue # 0123456 - if line.find("# File ")==0: - filename = line[7:-1] # trim newline + if line.find("# File ") == 0: + filename = line[7:-1] # trim newline header = {} continue if line.find("# motor_mne") == 0: @@ -175,35 +180,35 @@ def fablescandata_from_spt( spt, headeritems=["Omega",], continue if line.find("# motor_pos") == 0: motor_pos = [float(v) for v in line.split("=")[1].split()] - for k,v in zip( motor_mne, motor_pos ): - header[ k ] = v + for k, v in zip(motor_mne, motor_pos): + header[k] = v continue if line.find("# counter_mne") == 0: counter_mne = line.split("=")[1].split() continue if line.find("# counter_pos") == 0: counter_pos = [float(v) for v in line.split("=")[1].split()] - for k,v in zip( counter_mne, counter_pos ): - header[ k ] = v + for k, v in zip(counter_mne, counter_pos): + header[k] = v continue if line.find("# npks") == 0: - args = { key:float(header[key]) for key in headeritems } - mynode = nodeclass( filename=filename, **args ) - myscan.addNode( mynode, (-1,) ) + args = {key: float(header[key]) for key in headeritems} + mynode = nodeclass(filename=filename, **args) + myscan.addNode(mynode, (-1,)) continue for hitem in headeritems: if line.find(hitem) > 0: - val = line[1:].split( "=" )[-1].strip() - header[ hitem ] = val + val = line[1:].split("=")[-1].strip() + header[hitem] = val # end of frame marker return myscan -def sinogram_from_spt_list( sptlist ): - scans = [fablescandata_from_spt( sptfile ) for sptfile in sptlist ] +def sinogram_from_spt_list(sptlist): + scans = [fablescandata_from_spt(sptfile) for sptfile in sptlist] -def mergeScans( scan1, scan2 ): +def mergeScans(scan1, scan2): """ Merge two scans together """ @@ -211,11 +216,11 @@ def mergeScans( scan1, scan2 ): n2 = len(scan2) # "+" joins lists: nodes = scan1.nodes + scan2.nodes - edges = scan1.edges + [ (i + n1, j + n1) for i,j in nodes ] - return Scan( nodes, edges ) - + edges = scan1.edges + [(i + n1, j + n1) for i, j in nodes] + return Scan(nodes, edges) -class Project( object ): + +class Project(object): """ Holds all the information about the project @@ -224,13 +229,13 @@ class Project( object ): Mostly links to external files ? """ + def __init__(self): self.Scans = [] self.processing = [] - + def load(self): pass - + def save(self): pass - diff --git a/ImageD11/project/project_yaml.py b/ImageD11/project/project_yaml.py index 3f77e044..82afed6f 100644 --- a/ImageD11/project/project_yaml.py +++ b/ImageD11/project/project_yaml.py @@ -1,4 +1,3 @@ - from __future__ import print_function import yaml, pprint @@ -164,39 +163,33 @@ """ - y = yaml.load(my_yml) pprint.pprint(y) - - -class ImageD11Project( object ): - - def __init__(self, project_dict = None, filename=None ): - """ Takes filename or existing project dictionary """ - self.project_dict = { } +class ImageD11Project(object): + def __init__(self, project_dict=None, filename=None): + """Takes filename or existing project dictionary""" + self.project_dict = {} if filename is not None: - self.load( filename ) + self.load(filename) if project_dict is not None: - self.project_dict.update( project_dict ) + self.project_dict.update(project_dict) def load(self, filename): - """ Updates the existing project from the file (e.g. adds and overwrites) """ + """Updates the existing project from the file (e.g. adds and overwrites)""" with open(filename, "r") as stream: - pd = yaml.load( stream ) - self.project_dict.update( pd ) - + pd = yaml.load(stream) + self.project_dict.update(pd) + def save(self, filename): - """ Writes the current project to a file """ + """Writes the current project to a file""" with open(filename, "w") as stream: - stream.write( yaml.dump( self.project_dict, default_flow_style=False, encoding='utf-8' ) ) + stream.write( + yaml.dump(self.project_dict, default_flow_style=False, encoding="utf-8") + ) def validate(self): - """ Check that we have what we need in the dictionary - TODO - try to use schema from nexus or cif or something ? """ + """Check that we have what we need in the dictionary + TODO - try to use schema from nexus or cif or something ?""" pass - - - - diff --git a/ImageD11/project/projects.py b/ImageD11/project/projects.py index 76474540..a9479904 100644 --- a/ImageD11/project/projects.py +++ b/ImageD11/project/projects.py @@ -1,4 +1,3 @@ - """ImageD11 project files We want to be able to: @@ -57,4 +56,4 @@ - Concept of "previous" and "next" images in scan - Estimate background (single image method, file series method) -""" \ No newline at end of file +""" diff --git a/ImageD11/project/test_json.py b/ImageD11/project/test_json.py index 353479eb..1e17a65e 100644 --- a/ImageD11/project/test_json.py +++ b/ImageD11/project/test_json.py @@ -1,4 +1,3 @@ - from __future__ import print_function import yaml, pprint diff --git a/ImageD11/rc_array.py b/ImageD11/rc_array.py index c279079b..9d465408 100644 --- a/ImageD11/rc_array.py +++ b/ImageD11/rc_array.py @@ -1,4 +1,3 @@ - from __future__ import print_function """ @@ -10,36 +9,41 @@ peaks equally well and in a coherent way. """ -from numpy import dot, round_, array, allclose, asarray, fabs,\ - argmin, argmax, sqrt, argsort, take, sum, where, ndarray, eye,\ - zeros, cross -from numpy.linalg import inv, LinAlgError +from numpy import ( + dot, + array, + allclose, + sum, + ndarray, + eye, + zeros, +) +from numpy.linalg import inv # Confirm that dot'ting a 3x3 matrix with a 3x10 gives a 3x10 -assert dot(eye(3), zeros( (3, 10) ) ).shape == (3, 10), \ - "Numpy dot insanity problem" - +assert dot(eye(3), zeros((3, 10))).shape == (3, 10), "Numpy dot insanity problem" + # It is unclear why it is not a 10x3 result (row/col vectors) try: - dot(eye(3), zeros( (10, 3) ) ) + dot(eye(3), zeros((10, 3))) raise Exception("Numpy dot insanity problem") -except ValueError: +except ValueError: pass except: print("Unexpected exception when checking numpy behaviour") raise - # Based on http://www.scipy.org/Subclasses class rc_array(ndarray): - """ + """ Row/Column array Represent a list of row or column vectors """ + def __new__(subtype, data, direction=None, dtype=None, copy=False): - """ + """ Mostly as from example direction is one of row / column """ @@ -47,24 +51,21 @@ def __new__(subtype, data, direction=None, dtype=None, copy=False): subarr = subarr.view(subtype) if direction is not None: subarr.direction = direction - elif hasattr(data, 'info'): + elif hasattr(data, "info"): subarr.direction = data.direction return subarr - + def __array_finalize__(self, obj): - """ + """ Fill in a default row arg to direction self/obj?? """ - self.direction = getattr(obj, 'direction', 'row' ) - + self.direction = getattr(obj, "direction", "row") def __str__(self): - """ Used for printing """ - desc = \ -"""{%(data)s in %(direction)s direction}""" - return desc % { 'data': super.__str__(self), - 'direction' : self.direction } + """Used for printing""" + desc = """{%(data)s in %(direction)s direction}""" + return desc % {"data": super.__str__(self), "direction": self.direction} def __iter__(self): """ @@ -72,43 +73,42 @@ def __iter__(self): Use to get [ v for v in rr_array ] """ # print "iter called" - if self.direction == 'row': + if self.direction == "row": return ndarray.__iter__(self) - elif self.direction == 'col': + elif self.direction == "col": return ndarray.__iter__(self.T) else: raise Exception("rc_array with direction not in row|col") def norm2(self): - """ sum(v*v,axis=? for row or col """ - return sum( self*self, axis=self.vector_axis()) + """sum(v*v,axis=? for row or col""" + return sum(self * self, axis=self.vector_axis()) def vector_axis(self): - """ The axis which has the 3 on it """ - return ['col', 'row'].index(self.direction) + """The axis which has the 3 on it""" + return ["col", "row"].index(self.direction) def nb_vector_axis(self): - """ The axis which has the n on it """ - return ['row', 'col'].index(self.direction) + """The axis which has the n on it""" + return ["row", "col"].index(self.direction) def other_direction(self): - """ The one which is not self.direction - """ - return ['row', 'col'][self.vector_axis()] + """The one which is not self.direction""" + return ["row", "col"][self.vector_axis()] def check(self): - """ - Ensure we have an rc_array which is well behaved + """ + Ensure we have an rc_array which is well behaved Pattern assert(check(v)) should disappear in optimiser """ - assert hasattr(self, 'direction') - assert self.direction in ['row', 'col'] + assert hasattr(self, "direction") + assert self.direction in ["row", "col"] if len(self.shape) == 1: assert self.shape[0] == 3, str(self) elif len(self.shape) == 2: - if self.direction == 'row': + if self.direction == "row": assert self.shape[1] == 3 - if self.direction == 'col': + if self.direction == "col": assert self.shape[0] == 3 else: raise Exception("Only 1D or 2D rc_arrays allowed so far") @@ -123,45 +123,43 @@ def flip(self, mat): """ assert self.check() assert mat.shape == (3, 3) - if self.direction == 'row': - ret = dot( mat , self.T) - if self.direction == 'col': - ret = dot( mat, self).T - ret = rc_array( ret, direction = self.other_direction()) + if self.direction == "row": + ret = dot(mat, self.T) + if self.direction == "col": + ret = dot(mat, self).T + ret = rc_array(ret, direction=self.other_direction()) ret.check() - assert ret.shape == self.shape[::-1],"Shape mismatch in flip" + assert ret.shape == self.shape[::-1], "Shape mismatch in flip" return ret def inv(self): """ Inverse matrix of self """ - assert self.shape == (3,3) + assert self.shape == (3, 3) ret = inv(self) return rc_array(ret, self.other_direction()) -if __name__=="__main__": - - v = rc_array([1,2,3],direction='row') - assert v.other_direction() == 'col' +if __name__ == "__main__": - v = rc_array([[1,2,3],[4,5,6]],direction='row') + v = rc_array([1, 2, 3], direction="row") + assert v.other_direction() == "col" - assert v.other_direction() == 'col' - assert v.flip(eye(3)).direction == 'col' - assert allclose( v.flip(eye(3)) , v.T ) - assert v.norm2().shape == ( v.shape[ v.nb_vector_axis() ] ,) - assert allclose( v.norm2(), array([ 1+4+9, 16+25+36 ])), str(v) - + v = rc_array([[1, 2, 3], [4, 5, 6]], direction="row") - v = rc_array(array( [[1,2,3],[4,5,6]] ).T ,direction='col') + assert v.other_direction() == "col" + assert v.flip(eye(3)).direction == "col" + assert allclose(v.flip(eye(3)), v.T) + assert v.norm2().shape == (v.shape[v.nb_vector_axis()],) + assert allclose(v.norm2(), array([1 + 4 + 9, 16 + 25 + 36])), str(v) - assert v.other_direction() == 'row' - assert v.flip(eye(3)).direction == 'row' - assert allclose( v.flip(eye(3)) , v.T ) + v = rc_array(array([[1, 2, 3], [4, 5, 6]]).T, direction="col") - assert v.norm2().shape == ( v.shape[ v.nb_vector_axis() ] ,) + assert v.other_direction() == "row" + assert v.flip(eye(3)).direction == "row" + assert allclose(v.flip(eye(3)), v.T) - assert allclose( v.norm2(), array([ 1+4+9, 16+25+36 ])), str(v) + assert v.norm2().shape == (v.shape[v.nb_vector_axis()],) + assert allclose(v.norm2(), array([1 + 4 + 9, 16 + 25 + 36])), str(v) diff --git a/ImageD11/refinegrains.py b/ImageD11/refinegrains.py index 87080748..9e883e16 100644 --- a/ImageD11/refinegrains.py +++ b/ImageD11/refinegrains.py @@ -1,4 +1,3 @@ - from __future__ import print_function, division # Automatically adapted for numpy.oldnumeric Sep 06, 2007 by alter_codepy @@ -24,110 +23,165 @@ import numpy -from ImageD11 import transform, indexing, parameters, ImageD11options +from ImageD11 import transform, parameters, ImageD11options from ImageD11 import grain, columnfile, cImageD11, simplex import xfab.tools # print(__file__) - - -def triclinic( cp ): +def triclinic(cp): return cp -def monoclinic_a( cp ): - a,b,c,al,be,ga = cp - return [a,b,c,al,90.,90.] - -def monoclinic_b( cp ): - a,b,c,al,be,ga = cp - return [a,b,c,90.,be,90.] - -def monoclinic_c( cp ): - a,b,c,al,be,ga = cp - return [a,b,c,90.,90.,ga] - -def orthorhombic( cp ): - """ a=b, c, 90,90,90 """ - a,b,c,al,be,ga = cp - return [ a, b, c, 90., 90., 90. ] - -def tetragonal( cp ): - """ a=b, c, 90,90,90 """ - a,b,c,al,be,ga = cp - return [ (a+b)/2., (a+b)/2., c, 90., 90., 90. ] - -def trigonalP( cp ): - """ a=b=c, alpha=beta=gamma """ - a,b,c,al,be,ga = cp - anew = (a+b+c)/3. - alnew = (al+be+ga)/3. - return [anew,anew,anew,alnew,alnew,alnew] - -def trigonalH( cp ): - """ a=b,c, alpha=beta=90,gamma=120 """ - a,b,c,al,be,ga = cp - anew = (a+b)/2. - return [ anew, anew, c, 90., 90., 120.] + +def monoclinic_a(cp): + a, b, c, al, be, ga = cp + return [a, b, c, al, 90.0, 90.0] + + +def monoclinic_b(cp): + a, b, c, al, be, ga = cp + return [a, b, c, 90.0, be, 90.0] + + +def monoclinic_c(cp): + a, b, c, al, be, ga = cp + return [a, b, c, 90.0, 90.0, ga] + + +def orthorhombic(cp): + """a=b, c, 90,90,90""" + a, b, c, al, be, ga = cp + return [a, b, c, 90.0, 90.0, 90.0] + + +def tetragonal(cp): + """a=b, c, 90,90,90""" + a, b, c, al, be, ga = cp + return [(a + b) / 2.0, (a + b) / 2.0, c, 90.0, 90.0, 90.0] + + +def trigonalP(cp): + """a=b=c, alpha=beta=gamma""" + a, b, c, al, be, ga = cp + anew = (a + b + c) / 3.0 + alnew = (al + be + ga) / 3.0 + return [anew, anew, anew, alnew, alnew, alnew] + + +def trigonalH(cp): + """a=b,c, alpha=beta=90,gamma=120""" + a, b, c, al, be, ga = cp + anew = (a + b) / 2.0 + return [anew, anew, c, 90.0, 90.0, 120.0] + hexagonal = trigonalH -def cubic( cp ): - """ a=b=c, alpha=beta=gamma=90 """ - anew = (cp[0]+cp[1]+cp[2])/3. - return [ anew, anew, anew, 90., 90., 90.] - + +def cubic(cp): + """a=b=c, alpha=beta=gamma=90""" + anew = (cp[0] + cp[1] + cp[2]) / 3.0 + return [anew, anew, anew, 90.0, 90.0, 90.0] + def get_options(parser): - parser.add_argument("-p", "--parfile", action="store", - dest="parfile", - type=ImageD11options.ParameterFileType(mode='r'), - help="Name of input parameter file") - parser.add_argument("-u", "--ubifile", action="store", - dest="ubifile", - type=ImageD11options.UbiFileType(mode='r'), - help="Name of ubi file") - parser.add_argument("-U", "--newubifile", action="store", - dest="newubifile", - type=ImageD11options.UbiFileType(mode='w'), - help="Name of new ubi file to output") - parser.add_argument("-f", "--fltfile", action="store", - dest="fltfile", - type=ImageD11options.ColumnFileType(mode='r'), - help="Name of flt file") - parser.add_argument("-F", "--newfltfile", action="store", - dest="newfltfile", - type=ImageD11options.ColumnFileType(mode='w'), - help="Name of flt file containing unindexed peaks") - lattices = ["cubic", "hexagonal", "trigonalH","trigonalP", - "tetragonal", "orthorhombic", "monoclinic_a", - "monoclinic_b","monoclinic_c","triclinic"] - parser.add_argument("-s", "--sym", action="store", - dest="symmetry", # type="choice", - default = "triclinic", - choices = lattices, - help="Lattice symmetry for choosing orientation") - parser.add_argument("-l", "--lattice", action="store", - dest="latticesymmetry", #type="choice", - default = "triclinic", - choices = lattices, - help="Lattice symmetry for choosing orientation from "+ - "|".join(lattices)) - parser.add_argument("-t", "--tol", action="store", - dest="tol", type=float, - default = 0.25, - help="Tolerance to use in peak assignment, default=%f"%(0.25)) - parser.add_argument( "--omega_no_float", action="store_false", - dest = "omega_float", - default = True, - help= "Use exact observed omega values") - - parser.add_argument( "--omega_slop", action="store", type=float, - dest = "omega_slop", - default = 0.5, - help= "Omega slop (step) size") + parser.add_argument( + "-p", + "--parfile", + action="store", + dest="parfile", + type=ImageD11options.ParameterFileType(mode="r"), + help="Name of input parameter file", + ) + parser.add_argument( + "-u", + "--ubifile", + action="store", + dest="ubifile", + type=ImageD11options.UbiFileType(mode="r"), + help="Name of ubi file", + ) + parser.add_argument( + "-U", + "--newubifile", + action="store", + dest="newubifile", + type=ImageD11options.UbiFileType(mode="w"), + help="Name of new ubi file to output", + ) + parser.add_argument( + "-f", + "--fltfile", + action="store", + dest="fltfile", + type=ImageD11options.ColumnFileType(mode="r"), + help="Name of flt file", + ) + parser.add_argument( + "-F", + "--newfltfile", + action="store", + dest="newfltfile", + type=ImageD11options.ColumnFileType(mode="w"), + help="Name of flt file containing unindexed peaks", + ) + lattices = [ + "cubic", + "hexagonal", + "trigonalH", + "trigonalP", + "tetragonal", + "orthorhombic", + "monoclinic_a", + "monoclinic_b", + "monoclinic_c", + "triclinic", + ] + parser.add_argument( + "-s", + "--sym", + action="store", + dest="symmetry", # type="choice", + default="triclinic", + choices=lattices, + help="Lattice symmetry for choosing orientation", + ) + parser.add_argument( + "-l", + "--lattice", + action="store", + dest="latticesymmetry", # type="choice", + default="triclinic", + choices=lattices, + help="Lattice symmetry for choosing orientation from " + "|".join(lattices), + ) + parser.add_argument( + "-t", + "--tol", + action="store", + dest="tol", + type=float, + default=0.25, + help="Tolerance to use in peak assignment, default=%f" % (0.25), + ) + parser.add_argument( + "--omega_no_float", + action="store_false", + dest="omega_float", + default=True, + help="Use exact observed omega values", + ) + + parser.add_argument( + "--omega_slop", + action="store", + type=float, + dest="omega_slop", + default=0.5, + help="Omega slop (step) size", + ) return parser @@ -140,73 +194,78 @@ class refinegrains: """ # Default parameters - pars = {"cell__a" : 4.1569162, - "cell__b" : 4.1569162, - "cell__c" : 4.1569162, - "cell_alpha" : 90.0, - "cell_beta" : 90.0, - "cell_gamma" : 90.0, - "cell_lattice_[P,A,B,C,I,F,R]" : 'P', - "chi" : 0.0, - "distance" : 7367.8452302, - "fit_tolerance" : 0.5, - "o11" : 1, - "o12" : 0, - "o21" : 0, - "o22" : 1, - "omegasign" : 1, - "t_x" : 33.0198146824, - "t_y" : 14.6384893741, - "t_z" : 0.0 , - "tilt_x" : 0.0, - "tilt_y" : 0.0623952920101, - "tilt_z" : 0.00995011461696, - "wavelength" : 0.2646, - "wedge" : 0.0, - "y_center" : 732.950204632, - "y_size" : 4.6, - "z_center" : 517.007049626, - "z_size" : 4.6 } + pars = { + "cell__a": 4.1569162, + "cell__b": 4.1569162, + "cell__c": 4.1569162, + "cell_alpha": 90.0, + "cell_beta": 90.0, + "cell_gamma": 90.0, + "cell_lattice_[P,A,B,C,I,F,R]": "P", + "chi": 0.0, + "distance": 7367.8452302, + "fit_tolerance": 0.5, + "o11": 1, + "o12": 0, + "o21": 0, + "o22": 1, + "omegasign": 1, + "t_x": 33.0198146824, + "t_y": 14.6384893741, + "t_z": 0.0, + "tilt_x": 0.0, + "tilt_y": 0.0623952920101, + "tilt_z": 0.00995011461696, + "wavelength": 0.2646, + "wedge": 0.0, + "y_center": 732.950204632, + "y_size": 4.6, + "z_center": 517.007049626, + "z_size": 4.6, + } # Default stepsizes for the stepsizes = { - "wavelength" : 0.001, - 'y_center' : 0.2, - 'z_center' : 0.2, - 'distance' : 200., - 'tilt_y' : transform.radians(0.1), - 'tilt_z' : transform.radians(0.1), - 'tilt_x' : transform.radians(0.1), - 'wedge' : transform.radians(0.1), - 'chi' : transform.radians(0.1), - 't_x' : 0.2, - 't_y' : 0.2, - 't_z' : 0.2, - 'y_size' : 0.2, - 'z_size' : 0.2, - } - - def __init__(self, tolerance = 0.01, intensity_tth_range = (6.1, 6.3), - latticesymmetry = triclinic, - OmFloat=True, OmSlop=0.25 ): - """ - - """ - self.OMEGA_FLOAT=OmFloat - self.slop=OmSlop + "wavelength": 0.001, + "y_center": 0.2, + "z_center": 0.2, + "distance": 200.0, + "tilt_y": transform.radians(0.1), + "tilt_z": transform.radians(0.1), + "tilt_x": transform.radians(0.1), + "wedge": transform.radians(0.1), + "chi": transform.radians(0.1), + "t_x": 0.2, + "t_y": 0.2, + "t_z": 0.2, + "y_size": 0.2, + "z_size": 0.2, + } + + def __init__( + self, + tolerance=0.01, + intensity_tth_range=(6.1, 6.3), + latticesymmetry=triclinic, + OmFloat=True, + OmSlop=0.25, + ): + """ """ + self.OMEGA_FLOAT = OmFloat + self.slop = OmSlop if self.OMEGA_FLOAT: - print("Using",self.slop,"degree slop") + print("Using", self.slop, "degree slop") else: print("Omega is used as observed") - self.tolerance=tolerance + self.tolerance = tolerance # list of ubi matrices (1 for each grain in each scan) self.grainnames = [] self.ubisread = {} self.translationsread = {} # list of scans and corresponding data - self.scannames=[] - self.scantitles={} - self.scandata={} + self.scannames = [] + self.scantitles = {} + self.scandata = {} # grains in each scan self.grains = {} self.grains_to_refine = [] @@ -216,8 +275,8 @@ def __init__(self, tolerance = 0.01, intensity_tth_range = (6.1, 6.3), self.parameterobj = parameters.parameters(**self.pars) self.intensity_tth_range = intensity_tth_range self.recompute_xlylzl = False - for k,s in list(self.stepsizes.items()): - self.parameterobj.stepsizes[k]=s + for k, s in list(self.stepsizes.items()): + self.parameterobj.stepsizes[k] = s def loadparameters(self, filename): self.parameterobj.loadparameters(filename) @@ -241,7 +300,7 @@ def readubis(self, filename): self.grainnames.append(i) self.ubisread[name] = g.ubi self.translationsread[name] = g.translation - #print "Grain names",self.grainnames + # print "Grain names",self.grainnames def savegrains(self, filename, sort_npks=True): """ @@ -252,39 +311,41 @@ def savegrains(self, filename, sort_npks=True): # sort by number of peaks indexed to write out if sort_npks: # npks in x array - order = numpy.argsort( [self.grains[k].npks for k in ks ]) + order = numpy.argsort([self.grains[k].npks for k in ks]) ks = [ks[i] for i in order[::-1]] else: ks.sort() - gl = [ (self.grains[k],k) for k in ks ] + gl = [(self.grains[k], k) for k in ks] # Update the datafile and grain names reflect indices in grain list - for g,k in gl: + for g, k in gl: name, fltname = g.name.split(":") - assert fltname in self.scandata,"Sorry - logical flaw" - assert len(list(self.scandata.keys()))==1,"Sorry - need to fix for multi data" - self.set_translation(k[0],fltname) - self.compute_gv( g , update_columns = True ) - numpy.put( self.scandata[fltname].gx, g.ind, self.gv[:,0] ) - numpy.put( self.scandata[fltname].gy, g.ind, self.gv[:,1] ) - numpy.put( self.scandata[fltname].gz, g.ind, self.gv[:,2] ) + assert fltname in self.scandata, "Sorry - logical flaw" + assert ( + len(list(self.scandata.keys())) == 1 + ), "Sorry - need to fix for multi data" + self.set_translation(k[0], fltname) + self.compute_gv(g, update_columns=True) + numpy.put(self.scandata[fltname].gx, g.ind, self.gv[:, 0]) + numpy.put(self.scandata[fltname].gy, g.ind, self.gv[:, 1]) + numpy.put(self.scandata[fltname].gz, g.ind, self.gv[:, 2]) hkl_real = numpy.dot(g.ubi, self.gv.T) - numpy.put( self.scandata[fltname].hr, g.ind, hkl_real[0,:] ) - numpy.put( self.scandata[fltname].kr, g.ind, hkl_real[1,:] ) - numpy.put( self.scandata[fltname].lr, g.ind, hkl_real[2,:] ) + numpy.put(self.scandata[fltname].hr, g.ind, hkl_real[0, :]) + numpy.put(self.scandata[fltname].kr, g.ind, hkl_real[1, :]) + numpy.put(self.scandata[fltname].lr, g.ind, hkl_real[2, :]) hkl = numpy.floor(hkl_real + 0.5) - numpy.put( self.scandata[fltname].h, g.ind, hkl[0,:] ) - numpy.put( self.scandata[fltname].k, g.ind, hkl[1,:] ) - numpy.put( self.scandata[fltname].l, g.ind, hkl[2,:] ) + numpy.put(self.scandata[fltname].h, g.ind, hkl[0, :]) + numpy.put(self.scandata[fltname].k, g.ind, hkl[1, :]) + numpy.put(self.scandata[fltname].l, g.ind, hkl[2, :]) # Count "uniq" reflections... - sign_eta = numpy.sign( self.scandata[fltname].eta_per_grain[g.ind] ) - uniq_list = [ (int(h),int(k),int(l),int(s)) for - (h,k,l),s in zip( hkl.T, sign_eta) ] - g.nuniq = len( set(uniq_list ) ) + sign_eta = numpy.sign(self.scandata[fltname].eta_per_grain[g.ind]) + uniq_list = [ + (int(h), int(k), int(l), int(s)) + for (h, k, l), s in zip(hkl.T, sign_eta) + ] + g.nuniq = len(set(uniq_list)) grain.write_grain_file(filename, [g[0] for g in gl]) - - def makeuniq(self, symmetry): """ Flip orientations to a particular choice @@ -293,12 +354,12 @@ def makeuniq(self, symmetry): you might have problems... """ from ImageD11.sym_u import find_uniq_u, getgroup - g = getgroup( symmetry )() - for k in list(self.ubisread.keys()): + + g = getgroup(symmetry)() + for k in list(self.ubisread.keys()): self.ubisread[k] = find_uniq_u(self.ubisread[k], g) for k in list(self.grains.keys()): - self.grains[k].set_ubi( find_uniq_u(self.grains[k].ubi, g) ) - + self.grains[k].set_ubi(find_uniq_u(self.grains[k].ubi, g)) def loadfiltered(self, filename): """ @@ -307,40 +368,42 @@ def loadfiltered(self, filename): col = columnfile.columnfile(filename) self.scannames.append(filename) self.scantitles[filename] = col.titles - if not "drlv2" in col.titles: - col.addcolumn( numpy.ones(col.nrows, float), - "drlv2" ) - if not "labels" in col.titles: - col.addcolumn( numpy.ones(col.nrows, float)-2, - "labels" ) - if not "sc" in col.titles: + if "drlv2" not in col.titles: + col.addcolumn(numpy.ones(col.nrows, float), "drlv2") + if "labels" not in col.titles: + col.addcolumn(numpy.ones(col.nrows, float) - 2, "labels") + if "sc" not in col.titles: assert "xc" in col.titles - col.addcolumn( col.xc.copy(), "sc") - if not "fc" in col.titles: + col.addcolumn(col.xc.copy(), "sc") + if "fc" not in col.titles: assert "yc" in col.titles - col.addcolumn( col.yc.copy(), "fc") + col.addcolumn(col.yc.copy(), "fc") self.scandata[filename] = col - def generate_grains(self): - t = numpy.array([ self.parameterobj.parameters[s] - for s in ['t_x', 't_y','t_z']] ) + t = numpy.array( + [self.parameterobj.parameters[s] for s in ["t_x", "t_y", "t_z"]] + ) for grainname in self.grainnames: for scanname in self.scannames: try: - gr = self.grains[(grainname,scanname)] + _ = self.grains[(grainname, scanname)] except KeyError: if self.translationsread[grainname] is None: - self.grains[(grainname,scanname)] = grain.grain( - self.ubisread[grainname], translation = t ) - self.grains[(grainname,scanname)].name = \ - (str(grainname)+":"+scanname).replace(" ","_") + self.grains[(grainname, scanname)] = grain.grain( + self.ubisread[grainname], translation=t + ) + self.grains[(grainname, scanname)].name = ( + str(grainname) + ":" + scanname + ).replace(" ", "_") else: - self.grains[(grainname,scanname)] = grain.grain( + self.grains[(grainname, scanname)] = grain.grain( self.ubisread[grainname], - translation = self.translationsread[grainname] ) - self.grains[(grainname,scanname)].name = \ - (str(grainname)+":"+scanname).replace(" ","_") + translation=self.translationsread[grainname], + ) + self.grains[(grainname, scanname)].name = ( + str(grainname) + ":" + scanname + ).replace(" ", "_") for scanname in self.scannames: self.reset_labels(scanname) @@ -354,237 +417,232 @@ def reset_labels(self, scanname): y = self.scandata[scanname].fc om = self.scandata[scanname].omega # only for this grain - self.scandata[scanname].labels = self.scandata[scanname].labels*0 - 2 - self.scandata[scanname].drlv2 = self.scandata[scanname].drlv2*0 + 1 + self.scandata[scanname].labels = self.scandata[scanname].labels * 0 - 2 + self.scandata[scanname].drlv2 = self.scandata[scanname].drlv2 * 0 + 1 for g in self.grainnames: - self.grains[(g,scanname)].x = x - self.grains[(g,scanname)].y = y - self.grains[(g,scanname)].om = om - - + self.grains[(g, scanname)].x = x + self.grains[(g, scanname)].y = y + self.grains[(g, scanname)].om = om - def compute_gv(self, thisgrain , update_columns = False ): + def compute_gv(self, thisgrain, update_columns=False): """ Makes self.gv refer be g-vectors computed for this grain in this scan """ - peaks_xyz = thisgrain.peaks_xyz + peaks_xyz = thisgrain.peaks_xyz om = thisgrain.om try: - sign = self.parameterobj.parameters['omegasign'] + sign = self.parameterobj.parameters["omegasign"] except: sign = 1.0 # translation should match grain translation here... - self.tth,self.eta = transform.compute_tth_eta_from_xyz( peaks_xyz.T, - omega = om * sign, - **self.parameterobj.parameters) - - gv = transform.compute_g_vectors(self.tth, self.eta, om*sign, - float(self.parameterobj.parameters['wavelength']), - self.parameterobj.parameters['wedge'], - self.parameterobj.parameters['chi']) + self.tth, self.eta = transform.compute_tth_eta_from_xyz( + peaks_xyz.T, omega=om * sign, **self.parameterobj.parameters + ) + + gv = transform.compute_g_vectors( + self.tth, + self.eta, + om * sign, + float(self.parameterobj.parameters["wavelength"]), + self.parameterobj.parameters["wedge"], + self.parameterobj.parameters["chi"], + ) if self.OMEGA_FLOAT: mat = thisgrain.ubi.copy() gvT = numpy.ascontiguousarray(gv.T) - junk = cImageD11.score_and_refine(mat , gvT, - self.tolerance) - hklf = numpy.dot( mat, gv ) - hkli = numpy.round( hklf ) - - gcalc = numpy.dot( numpy.linalg.inv(mat) , hkli ) - tth,[eta1,eta2],[omega1,omega2] = transform.uncompute_g_vectors( - gcalc , float(self.parameterobj.parameters['wavelength']), - self.parameterobj.parameters['wedge'], - self.parameterobj.parameters['chi']) + _ = cImageD11.score_and_refine(mat, gvT, self.tolerance) + hklf = numpy.dot(mat, gv) + hkli = numpy.round(hklf) + + gcalc = numpy.dot(numpy.linalg.inv(mat), hkli) + tth, [eta1, eta2], [omega1, omega2] = transform.uncompute_g_vectors( + gcalc, + float(self.parameterobj.parameters["wavelength"]), + self.parameterobj.parameters["wedge"], + self.parameterobj.parameters["chi"], + ) e1e = numpy.abs(eta1 - self.eta) e2e = numpy.abs(eta2 - self.eta) try: - eta_err = numpy.array( [ e1e, e2e ] ) + eta_err = numpy.array([e1e, e2e]) except: print(e1e.shape, e2e.shape, e1e) raise - best_fitting = numpy.argmin( eta_err, axis = 0 ) + best_fitting = numpy.argmin(eta_err, axis=0) # These are always 1 or zero # pick the right omega (confuddled by take here) - omega_calc = best_fitting * omega2 + ( 1 - best_fitting ) * omega1 + omega_calc = best_fitting * omega2 + (1 - best_fitting) * omega1 # Take a weighted average within the omega error of the observed - omerr = (om*sign - omega_calc) + omerr = om * sign - omega_calc # Clip to 360 degree range - omerr = omerr - ( 360 * numpy.round( omerr / 360.0 ) ) + omerr = omerr - (360 * numpy.round(omerr / 360.0)) # print omerr[0:5] - omega_calc = om*sign - numpy.clip( omerr, -self.slop , self.slop ) + omega_calc = om * sign - numpy.clip(omerr, -self.slop, self.slop) # print omega_calc[0], om[0] thisgrain.omega_calc = omega_calc # Now recompute with improved omegas... (tth, eta do not change much) - #self.tth, self.eta = transform.compute_tth_eta( + # self.tth, self.eta = transform.compute_tth_eta( # numpy.array([x, y]), # omega = omega_calc, # **self.parameterobj.parameters) - self.tth,self.eta = transform.compute_tth_eta_from_xyz( peaks_xyz.T, - omega = om * sign, - **self.parameterobj.parameters) - - gv = transform.compute_g_vectors(self.tth, self.eta, omega_calc, - float(self.parameterobj.parameters['wavelength']), - self.parameterobj.parameters['wedge'], - self.parameterobj.parameters['chi']) - + self.tth, self.eta = transform.compute_tth_eta_from_xyz( + peaks_xyz.T, omega=om * sign, **self.parameterobj.parameters + ) + + gv = transform.compute_g_vectors( + self.tth, + self.eta, + omega_calc, + float(self.parameterobj.parameters["wavelength"]), + self.parameterobj.parameters["wedge"], + self.parameterobj.parameters["chi"], + ) + else: - thisgrain.omega_calc[:]=0 + thisgrain.omega_calc[:] = 0 # update tth_per_grain and eta_per_grain if update_columns: name = thisgrain.name.split(":")[1] - numpy.put( self.scandata[name].tth_per_grain, - thisgrain.ind, - self.tth) - numpy.put( self.scandata[name].eta_per_grain , - thisgrain.ind, - self.eta) + numpy.put(self.scandata[name].tth_per_grain, thisgrain.ind, self.tth) + numpy.put(self.scandata[name].eta_per_grain, thisgrain.ind, self.eta) if self.OMEGA_FLOAT: - numpy.put( self.scandata[name].omegacalc_per_grain , - thisgrain.ind, - omega_calc) + numpy.put( + self.scandata[name].omegacalc_per_grain, thisgrain.ind, omega_calc + ) - - - self.gv = numpy.ascontiguousarray(gv.T) + self.gv = numpy.ascontiguousarray(gv.T) return - def refine(self, ubi, quiet=True): """ Fit the matrix without changing the peak assignments """ - mat=ubi.copy() + mat = ubi.copy() # print "In refine",self.tolerance, self.gv.shape # First time fits the mat - self.npks, self.avg_drlv2 = cImageD11.score_and_refine(mat, self.gv, - self.tolerance) + self.npks, self.avg_drlv2 = cImageD11.score_and_refine( + mat, self.gv, self.tolerance + ) # apply symmetry to mat: if self.latticesymmetry is not triclinic: - cp = xfab.tools.ubi_to_cell( mat ) - U = xfab.tools.ubi_to_u( mat ) - mat = xfab.tools.u_to_ubi( U, self.latticesymmetry( cp ) ).copy() + cp = xfab.tools.ubi_to_cell(mat) + U = xfab.tools.ubi_to_u(mat) + mat = xfab.tools.u_to_ubi(U, self.latticesymmetry(cp)).copy() # Second time updates the score with the new mat - self.npks, self.avg_drlv2 = cImageD11.score_and_refine(mat, self.gv, - self.tolerance) + self.npks, self.avg_drlv2 = cImageD11.score_and_refine( + mat, self.gv, self.tolerance + ) # apply symmetry to mat: if self.latticesymmetry is not triclinic: - cp = xfab.tools.ubi_to_cell( mat ) - U = xfab.tools.ubi_to_u( mat ) - mat = xfab.tools.u_to_ubi( U, self.latticesymmetry( cp ) ) + cp = xfab.tools.ubi_to_cell(mat) + U = xfab.tools.ubi_to_u(mat) + mat = xfab.tools.u_to_ubi(U, self.latticesymmetry(cp)) if not quiet: import math + try: - print("%-8d %.6f"%(self.npks,math.sqrt(self.avg_drlv2))) + print("%-8d %.6f" % (self.npks, math.sqrt(self.avg_drlv2))) except: - print(self.npks,self.avg_drlv2, mat, self.gv.shape, self.tolerance) -# raise + print(self.npks, self.avg_drlv2, mat, self.gv.shape, self.tolerance) + # raise - #print self.tolerance -# self.npks, self.avg_drlv2 = cImageD11.score_and_refine(mat, self.gv, -# self.tolerance) - #tm = indexing.refine(ubi,self.gv,self.tolerance,quiet=quiet) - #print ubi, tm,ubi-tm,mat-tm + # print self.tolerance + # self.npks, self.avg_drlv2 = cImageD11.score_and_refine(mat, self.gv, + # self.tolerance) + # tm = indexing.refine(ubi,self.gv,self.tolerance,quiet=quiet) + # print ubi, tm,ubi-tm,mat-tm return mat - def applyargs(self,args): + def applyargs(self, args): self.parameterobj.set_variable_values(args) - def printresult(self,arg): + def printresult(self, arg): # print self.parameterobj.parameters # return for i in range(len(self.parameterobj.varylist)): item = self.parameterobj.varylist[i] value = arg[i] try: - self.parameterobj.parameters[item]=value - print(item,value) + self.parameterobj.parameters[item] = value + print(item, value) except: # Hopefully a crystal translation pass - - def gof(self,args): + def gof(self, args): """ for all of the grains in all of the scans """ self.applyargs(args) - diffs = 0. - contribs = 0. + diffs = 0.0 + contribs = 0.0 # defaulting to fitting all grains for key in self.grains_to_refine: g = self.grains[key] -### rotdex.fitagrain( gr, self.parameterobj ) + ### rotdex.fitagrain( gr, self.parameterobj ) grainname = key[0] - scanname = key[1] + # scanname = key[1] ??? # Compute gv using current parameters # Keep labels fixed if self.recompute_xlylzl: - g.peaks_xyz = transform.compute_xyz_lab([ g.sc, - g.fc ], - **self.parameterobj.parameters).T + g.peaks_xyz = transform.compute_xyz_lab( + [g.sc, g.fc], **self.parameterobj.parameters + ).T - self.compute_gv( g ) - #print self.gv.shape - #print self.gv[0:10,i:] + self.compute_gv(g) + # print self.gv.shape + # print self.gv[0:10,i:] # For stability, always start refining the read in one - g.set_ubi( self.refine( self.ubisread[grainname] ) ) - #print self.npks,self.avg_drlv2 # number of peaks it got - - #print self.gv.shape + g.set_ubi(self.refine(self.ubisread[grainname])) + # print self.npks,self.avg_drlv2 # number of peaks it got + # print self.gv.shape - - diffs += self.npks*self.avg_drlv2 - contribs+= self.npks + diffs += self.npks * self.avg_drlv2 + contribs += self.npks if contribs > 0: - return 1e6*diffs/contribs + return 1e6 * diffs / contribs else: print("No contribs???", self.grains_to_refine) return 1e6 - def estimate_steps(self, gof, guess, steps): assert len(guess) == len(steps) cen = self.gof(guess) deriv = [] print("Estimating step sizes") for i in range(len(steps)): - print(self.parameterobj.varylist[i], end=' ') + print(self.parameterobj.varylist[i], end=" ") newguess = [g for g in guess] newguess[i] = newguess[i] + steps[i] here = self.gof(newguess) - print("npks, avgdrlv" , self.npks, self.avg_drlv2, end=' ') + print("npks, avgdrlv", self.npks, self.avg_drlv2, end=" ") deriv.append(here - cen) - print("D_gof, d_par, dg/dp",here-cen, steps[i],here-cen/steps[i]) + print("D_gof, d_par, dg/dp", here - cen, steps[i], here - cen / steps[i]) # Make all match the one which has the biggest impact j = numpy.argmax(numpy.absolute(deriv)) print("Max step size is on", self.parameterobj.varylist[j]) - inc = [ s * abs(deriv[j]) / abs(d) for d, s in zip(deriv, steps) ] - print("steps",steps) - print("inc",inc) + inc = [s * abs(deriv[j]) / abs(d) for d, s in zip(deriv, steps)] + print("steps", steps) + print("inc", inc) return inc - - - - def fit(self, maxiters=100): """ Fit the global parameters @@ -597,19 +655,19 @@ def fit(self, maxiters=100): self.grains_to_refine = list(self.grains.keys()) self.recompute_xlylzl = False for n in names: - if n not in ['t_x', 't_y', 't_z']: + if n not in ["t_x", "t_y", "t_z"]: self.recompute_xlylzl = True inc = self.estimate_steps(self.gof, guess, inc) - s=simplex.Simplex(self.gof, guess, inc) - newguess,error,iter=s.minimize(maxiters=maxiters , monitor=1) + s = simplex.Simplex(self.gof, guess, inc) + newguess, error, iter = s.minimize(maxiters=maxiters, monitor=1) print() - print("names",names) - print("ng",newguess) - for p,v in zip(names,newguess): + print("names", names) + print("ng", newguess) + for p, v in zip(names, newguess): # record results - self.parameterobj.set(p,v) - print("Setting parameter",p,v) - trans =["t_x","t_y","t_z"] + self.parameterobj.set(p, v) + print("Setting parameter", p, v) + trans = ["t_x", "t_y", "t_z"] for t in trans: if t in names: i = trans.index(t) @@ -619,211 +677,205 @@ def fit(self, maxiters=100): # diffractometer parameters for g in self.getgrains(): self.grains[g].translation[i] = newguess[names.index(t)] - print(g, t,i,newguess[names.index(t)]) + print(g, t, i, newguess[names.index(t)]) print() self.printresult(newguess) def getgrains(self): return list(self.grains.keys()) - - def set_translation(self,gr,sc): - self.parameterobj.parameters['t_x'] = self.grains[(gr,sc)].translation[0] - self.parameterobj.parameters['t_y'] = self.grains[(gr,sc)].translation[1] - self.parameterobj.parameters['t_z'] = self.grains[(gr,sc)].translation[2] - + def set_translation(self, gr, sc): + self.parameterobj.parameters["t_x"] = self.grains[(gr, sc)].translation[0] + self.parameterobj.parameters["t_y"] = self.grains[(gr, sc)].translation[1] + self.parameterobj.parameters["t_z"] = self.grains[(gr, sc)].translation[2] def refinepositions(self, quiet=True, maxiters=100): self.assignlabels() - ks = list(self.grains.keys()) + ks = list(self.grains.keys()) ks.sort() # assignments are now fixed tolcache = self.tolerance self.tolerance = 1.0 for key in ks: - g = key[0] self.grains_to_refine = [key] - self.parameterobj.varylist = [ 't_x', 't_y', 't_z' ] - self.set_translation(key[0],key[1]) + self.parameterobj.varylist = ["t_x", "t_y", "t_z"] + self.set_translation(key[0], key[1]) guess = self.parameterobj.get_variable_values() - inc = self.parameterobj.get_variable_stepsizes() + inc = self.parameterobj.get_variable_stepsizes() - s = simplex.Simplex(self.gof, guess, inc) + s = simplex.Simplex(self.gof, guess, inc) - newguess, error, iter = s.minimize(maxiters=maxiters,monitor=1) + newguess, error, iter = s.minimize(maxiters=maxiters, monitor=1) - self.grains[key].translation[0] = self.parameterobj.parameters['t_x'] - self.grains[key].translation[1] = self.parameterobj.parameters['t_y'] - self.grains[key].translation[2] = self.parameterobj.parameters['t_z'] - print(key,self.grains[key].translation, end=' ') - self.refine(self.grains[key].ubi,quiet=False) + self.grains[key].translation[0] = self.parameterobj.parameters["t_x"] + self.grains[key].translation[1] = self.parameterobj.parameters["t_y"] + self.grains[key].translation[2] = self.parameterobj.parameters["t_z"] + print(key, self.grains[key].translation, end=" ") + self.refine(self.grains[key].ubi, quiet=False) self.tolerance = tolcache - def refineubis(self, quiet=True, scoreonly=False): - #print quiet + # print quiet ks = list(self.grains.keys()) ks.sort() if not quiet: - print("%10s %10s"%("grainname","scanname"), end=' ') + print("%10s %10s" % ("grainname", "scanname"), end=" ") print("npeak ") for key in ks: g = self.grains[key] grainname = key[0] scanname = key[1] if not quiet: - print("%10s %10s"%(grainname,scanname), end=' ') + print("%10s %10s" % (grainname, scanname), end=" ") # Compute gv using current parameters, including grain position - self.set_translation(key[0],key[1]) + self.set_translation(key[0], key[1]) self.compute_gv(g) - res = self.refine(g.ubi , quiet=quiet) + res = self.refine(g.ubi, quiet=quiet) if not scoreonly: - g.set_ubi( res ) - + g.set_ubi(res) def assignlabels(self, quiet=False): """ Fill out the appropriate labels for the spots """ if not quiet: - print("Assigning labels with XLYLZL") + print("Assigning labels with XLYLZL") import time + start = time.time() for s in self.scannames: - self.scandata[s].labels = self.scandata[s].labels*0 - 2 # == -1 - drlv2 = numpy.zeros(len(self.scandata[s].drlv2), float)+1 + self.scandata[s].labels = self.scandata[s].labels * 0 - 2 # == -1 + drlv2 = numpy.zeros(len(self.scandata[s].drlv2), float) + 1 nr = self.scandata[s].nrows sc = self.scandata[s].sc fc = self.scandata[s].fc om = self.scandata[s].omega # Looks like this in one dataset only - ng = len(self.grainnames) - int_tmp = numpy.zeros(nr , numpy.int32 )-1 + int_tmp = numpy.zeros(nr, numpy.int32) - 1 tmp = transform.compute_xyz_lab( - [ self.scandata[s].sc, - self.scandata[s].fc ], - **self.parameterobj.parameters) + [self.scandata[s].sc, self.scandata[s].fc], + **self.parameterobj.parameters + ) peaks_xyz = tmp.T.copy() if not quiet: - print("Start first grain loop",time.time()-start) + print("Start first grain loop", time.time() - start) start = time.time() - gv = numpy.zeros((nr,3), float ) - wedge = self.parameterobj.parameters['wedge'] - omegasign = self.parameterobj.parameters['omegasign'] - chi = self.parameterobj.parameters['chi'] - wvln = self.parameterobj.parameters['wavelength'] + gv = numpy.zeros((nr, 3), float) + wedge = self.parameterobj.parameters["wedge"] + omegasign = self.parameterobj.parameters["omegasign"] + chi = self.parameterobj.parameters["chi"] + wvln = self.parameterobj.parameters["wavelength"] first_loop = time.time() - drlv2 = (self.scandata[s].drlv2*0 + 1).astype(float) # == 1 - int_tmp = numpy.zeros(nr , numpy.int32 )-1 + drlv2 = (self.scandata[s].drlv2 * 0 + 1).astype(float) # == 1 + int_tmp = numpy.zeros(nr, numpy.int32) - 1 for ig, g in enumerate(self.grainnames): - gr = self.grains[ ( g, s) ] - self.set_translation( g, s) - cImageD11.compute_gv( peaks_xyz, + gr = self.grains[(g, s)] + self.set_translation(g, s) + cImageD11.compute_gv( + peaks_xyz, self.scandata[s].omega, omegasign, wvln, wedge, chi, gr.translation, - gv) - cImageD11.score_and_assign( gr.ubi, - gv, - self.tolerance, - drlv2, - int_tmp, - int(g)) + gv, + ) + cImageD11.score_and_assign( + gr.ubi, gv, self.tolerance, drlv2, int_tmp, int(g) + ) if not quiet: - print(time.time()-first_loop,"First loop") + print(time.time() - first_loop, "First loop") self.gv = gv.copy() # Second loop after checking all grains if not quiet: - print("End first grain loop",time.time()-start) + print("End first grain loop", time.time() - start) start = time.time() if not quiet: - print(self.scandata[s].labels.shape, \ - numpy.minimum.reduce(self.scandata[s].labels),\ - numpy.maximum.reduce(self.scandata[s].labels)) - - self.scandata[s].addcolumn( int_tmp , "labels" ) - self.scandata[s].addcolumn( drlv2 , "drlv2" ) + print( + self.scandata[s].labels.shape, + numpy.minimum.reduce(self.scandata[s].labels), + numpy.maximum.reduce(self.scandata[s].labels), + ) + + self.scandata[s].addcolumn(int_tmp, "labels") + self.scandata[s].addcolumn(drlv2, "drlv2") if not quiet: - print(self.scandata[s].labels.shape, \ - numpy.minimum.reduce(self.scandata[s].labels),\ - numpy.maximum.reduce(self.scandata[s].labels)) - - tth = numpy.zeros( nr, numpy.float32 )-1 - eta = numpy.zeros( nr, numpy.float32 ) - - self.scandata[s].addcolumn( tth, "tth_per_grain" ) - self.scandata[s].addcolumn( eta, "eta_per_grain" ) - self.scandata[s].addcolumn( om*0, "omegacalc_per_grain" ) - self.scandata[s].addcolumn( self.gv[:,0], "gx") - self.scandata[s].addcolumn( self.gv[:,1], "gy") - self.scandata[s].addcolumn( self.gv[:,2], "gz") - self.scandata[s].addcolumn( numpy.zeros(nr, numpy.float32), "hr") - self.scandata[s].addcolumn( numpy.zeros(nr, numpy.float32), "kr") - self.scandata[s].addcolumn( numpy.zeros(nr, numpy.float32), "lr") - self.scandata[s].addcolumn( numpy.zeros(nr, numpy.float32), "h") - self.scandata[s].addcolumn( numpy.zeros(nr, numpy.float32), "k") - self.scandata[s].addcolumn( numpy.zeros(nr, numpy.float32), "l") - + print( + self.scandata[s].labels.shape, + numpy.minimum.reduce(self.scandata[s].labels), + numpy.maximum.reduce(self.scandata[s].labels), + ) + + tth = numpy.zeros(nr, numpy.float32) - 1 + eta = numpy.zeros(nr, numpy.float32) + + self.scandata[s].addcolumn(tth, "tth_per_grain") + self.scandata[s].addcolumn(eta, "eta_per_grain") + self.scandata[s].addcolumn(om * 0, "omegacalc_per_grain") + self.scandata[s].addcolumn(self.gv[:, 0], "gx") + self.scandata[s].addcolumn(self.gv[:, 1], "gy") + self.scandata[s].addcolumn(self.gv[:, 2], "gz") + self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "hr") + self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "kr") + self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "lr") + self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "h") + self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "k") + self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "l") if not quiet: - print("Start second grain loop",time.time()-start) + print("Start second grain loop", time.time() - start) start = time.time() # We have the labels set in self.scandata!!! for g in self.grainnames: - gr = self.grains[ ( g, s) ] - - ind = numpy.compress(int_tmp == g, - numpy.arange(nr) ) - #print 'x',gr.x[:10] - #print 'ind',ind[:10] - gr.ind = ind # use this to push back h,k,l later - gr.peaks_xyz = numpy.take( peaks_xyz, ind, axis=0 ) - gr.sc = numpy.take( sc, ind) - gr.fc = numpy.take( fc, ind) - gr.om = numpy.take(self.scandata[s].omega , ind) + gr = self.grains[(g, s)] + + ind = numpy.compress(int_tmp == g, numpy.arange(nr)) + # print 'x',gr.x[:10] + # print 'ind',ind[:10] + gr.ind = ind # use this to push back h,k,l later + gr.peaks_xyz = numpy.take(peaks_xyz, ind, axis=0) + gr.sc = numpy.take(sc, ind) + gr.fc = numpy.take(fc, ind) + gr.om = numpy.take(self.scandata[s].omega, ind) gr.omega_calc = gr.om.copy() gr.npks = len(gr.ind) - self.set_translation( g, s) + self.set_translation(g, s) try: - sign = self.parameterobj.parameters['omegasign'] + sign = self.parameterobj.parameters["omegasign"] except: sign = 1.0 - tth, eta = transform.compute_tth_eta_from_xyz( gr.peaks_xyz.T, - omega = gr.om * sign, - **self.parameterobj.parameters) + tth, eta = transform.compute_tth_eta_from_xyz( + gr.peaks_xyz.T, omega=gr.om * sign, **self.parameterobj.parameters + ) self.scandata[s].tth_per_grain[ind] = tth self.scandata[s].eta_per_grain[ind] = eta -# self.scandata[s].omegacalc_per_grain[ind] = gr.omega_calc - self.grains[ ( g, s) ] = gr + # self.scandata[s].omegacalc_per_grain[ind] = gr.omega_calc + self.grains[(g, s)] = gr if not quiet: - print("Grain",g,"Scan",s,"npks=",len(ind)) - #print 'x',gr.x[:10] + print("Grain", g, "Scan", s, "npks=", len(ind)) + # print 'x',gr.x[:10] # Compute the total integrated intensity if we have enough # information available - compute_lp_factor( self.scandata[s] ) + compute_lp_factor(self.scandata[s]) for g in self.grainnames: - gr = self.grains[ (g, s) ] - gr.intensity_info = compute_total_intensity( self.scandata[s] , - gr.ind, - self.intensity_tth_range, - quiet = quiet ) + gr = self.grains[(g, s)] + gr.intensity_info = compute_total_intensity( + self.scandata[s], gr.ind, self.intensity_tth_range, quiet=quiet + ) if not quiet: - print("End second grain loop",time.time()-start) - print() + print("End second grain loop", time.time() - start) + print() start = time.time() - -def compute_lp_factor( colfile, **kwds ): +def compute_lp_factor(colfile, **kwds): """ We'll assume for simplicity that wedge, chi, tilts are all zero Of course we should put that in sometime in the near future @@ -854,7 +906,7 @@ def compute_lp_factor( colfile, **kwds ): From this we will take the (1+cos^2(2t))cos(2t)/sqrt(cos^2p-sin^2t)/sin2t """ if "tth" in colfile.titles and "eta" in colfile.titles: - lp = lf( colfile.tth, colfile.eta ) + lp = lf(colfile.tth, colfile.eta) assert len(lp) == len(colfile.tth) try: colfile.addcolumn(lp, "Lorentz") @@ -862,51 +914,52 @@ def compute_lp_factor( colfile, **kwds ): print(lp.shape, colfile.tth.shape, colfile.nrows, "?") raise - if "tth_per_grain" in colfile.titles and \ - "eta_per_grain" in colfile.titles: - lpg = lf( colfile.tth_per_grain, colfile.eta_per_grain ) + if "tth_per_grain" in colfile.titles and "eta_per_grain" in colfile.titles: + lpg = lf(colfile.tth_per_grain, colfile.eta_per_grain) assert len(lpg) == len(colfile.tth_per_grain) colfile.addcolumn(lpg, "Lorentz_per_grain") -def lf( tth, eta ): +def lf(tth, eta): """ - INPUT: 2*theta and eta in degrees. Can be floats or numpy arrays. + INPUT: 2*theta and eta in degrees. Can be floats or numpy arrays. - OUTPUT: Lorentz scaling factor for intensity. Same data type as input. + OUTPUT: Lorentz scaling factor for intensity. Same data type as input. - EXPLANATION: - Compute the Lorentz Factor defined for 3DXRD as + EXPLANATION: + Compute the Lorentz Factor defined for 3DXRD as - L( theta,eta ) = sin( 2*theta )*|sin( eta )| + L( theta,eta ) = sin( 2*theta )*|sin( eta )| - This is verified by: + This is verified by: - * Kabsch, W. (1988). Evaluation of single-crystal X-ray di raction data - from a position-sensitive detector - * Poulsen, H. F. (2004). 3DXRD { a new probe for materials science. - Roskilde: Riso National Laboratory + * Kabsch, W. (1988). Evaluation of single-crystal X-ray di + raction data + from a position-sensitive detector + * Poulsen, H. F. (2004). 3DXRD { a new probe for materials science. + Roskilde: Riso National Laboratory - and can be derived with support of - * Als-Nielsen, J. and McMorrow, D. (2017). Elements of Modern X-ray Physics + and can be derived with support of + * Als-Nielsen, J. and McMorrow, D. (2017). Elements of Modern X-ray Physics - MODIFIED: 7 Feb 2019 by Axel Henningsson. + MODIFIED: 7 Feb 2019 by Axel Henningsson. """ - sin_tth = numpy.sin( numpy.radians(tth) ) - sin_eta = numpy.sin( numpy.radians(eta) ) - return sin_tth*abs( sin_eta ) + sin_tth = numpy.sin(numpy.radians(tth)) + sin_eta = numpy.sin(numpy.radians(eta)) + return sin_tth * abs(sin_eta) -def compute_total_intensity( colfile, indices, tth_range, ntrim = 2, quiet = False ): +def compute_total_intensity(colfile, indices, tth_range, ntrim=2, quiet=False): """ Add up the intensity in colfile for peaks given in indices """ if "sum_intensity" in colfile.titles: raw_intensities = colfile.sum_intensity else: - if ("Number_of_pixels" in colfile.titles) and \ - ("avg_intensity" in colfile.titles): + if ("Number_of_pixels" in colfile.titles) and ( + "avg_intensity" in colfile.titles + ): raw_intensities = colfile.Number_of_pixels * colfile.avg_intensity else: try: @@ -918,92 +971,102 @@ def compute_total_intensity( colfile, indices, tth_range, ntrim = 2, quiet = Fal if "Lorentz_per_grain" in colfile.titles: lor = colfile.Lorentz_per_grain if not quiet: - print("lorentz per grain for ints", end=' ') + print("lorentz per grain for ints", end=" ") elif "Lorentz" in colfile.titles: lor = colfile.Lorentz if not quiet: - print("lorentz for ints", end=' ') + print("lorentz for ints", end=" ") else: if not quiet: - print("lost the lorentz", end=' ') - lor = numpy.ones( colfile.nrows, numpy.float32 ) + print("lost the lorentz", end=" ") + lor = numpy.ones(colfile.nrows, numpy.float32) # risk divide by zero here... - intensities = numpy.take( raw_intensities * lor , indices ) - #print "Compute intensity",intensities,lor[indices] + intensities = numpy.take(raw_intensities * lor, indices) + # print "Compute intensity",intensities,lor[indices] sigma_i = numpy.sum(intensities) if tth_range is not None: if "tth_per_grain" in colfile.titles: tth = colfile.tth_per_grain if not quiet: - print("tth_per_grain for", end=' ') + print("tth_per_grain for", end=" ") elif "tth" in colfile.titles: tth = colfile.tth if not quiet: - print("tth for range", end=' ') + print("tth for range", end=" ") else: # bugger tth = numpy.ones(colfile.nrows) - tth_vals = numpy.take(tth, indices ) + tth_vals = numpy.take(tth, indices) - intensities = numpy.compress( ( tth_vals > min(tth_range) )& - ( tth_vals < max(tth_range) ) , - intensities ) + intensities = numpy.compress( + (tth_vals > min(tth_range)) & (tth_vals < max(tth_range)), intensities + ) if not quiet: - print("range %.5f %.5f"%tuple(tth_range), end=' ') - + print("range %.5f %.5f" % tuple(tth_range), end=" ") if len(intensities) < 1: return "no peaks" # min, max, med, mean, stddev, n intensities.sort() - if(len(intensities)) > ntrim*2+1: + if (len(intensities)) > ntrim * 2 + 1: intensities = intensities[ntrim:-ntrim] try: - ret = "sum_of_all = %f , middle %d from %f to %f in tth: median = %f , min = %f , max = %f , mean = %f , std = %f , n = %d"%( - sigma_i, - len(intensities), - min(tth_range), - max(tth_range), - intensities[ len(intensities)//2 ], - intensities.min(), - intensities.max(), - intensities.mean(), - intensities.std(), - intensities.shape[0]) + ret = ( + "sum_of_all = %f , middle %d from %f to %f in tth: median = %f , min = %f , max = %f , mean = %f , std = %f , n = %d" + % ( + sigma_i, + len(intensities), + min(tth_range), + max(tth_range), + intensities[len(intensities) // 2], + intensities.min(), + intensities.max(), + intensities.mean(), + intensities.std(), + intensities.shape[0], + ) + ) except: if not quiet: - print(intensities) + print(intensities) raise if not quiet: - print(ret) + print(ret) return ret - - def test_benoit(): - o=refinegrains() + o = refinegrains() o.loadparameters("start.prm") - scans = ["0N.flt" ,"811N.flt" , "2211N.flt" , "3311N.flt" , "4811N.flt" ] + scans = ["0N.flt", "811N.flt", "2211N.flt", "3311N.flt", "4811N.flt"] for name in scans: o.loadfiltered(name) o.readubis("ubitest") o.generate_grains() o.tolerance = 0.1 - #o.varytranslations() - for o.tolerance in [0.1,0.2,0.15,0.1,0.075,0.05]: + # o.varytranslations() + for o.tolerance in [0.1, 0.2, 0.15, 0.1, 0.075, 0.05]: o.refineubis(quiet=False) print("***") o.refineubis(quiet=False) - o.varylist = ['y-center','z-center','distance','tilt-y','tilt-z','wedge','chi'] + o.varylist = [ + "y-center", + "z-center", + "distance", + "tilt-y", + "tilt-z", + "wedge", + "chi", + ] o.fit() o.refineubis(quiet=False) def test_nac(): import sys + o = refinegrains() o.loadparameters(sys.argv[1]) print("got pars") @@ -1015,13 +1078,11 @@ def test_nac(): print("generating") o.generate_grains() print("Refining posi too") - o.refineubis(quiet = False , scoreonly = True) + o.refineubis(quiet=False, scoreonly=True) print("Refining positions too") o.refinepositions() print("Refining positions too") - o.refineubis(quiet = False , scoreonly = True) - - + o.refineubis(quiet=False, scoreonly=True) if __name__ == "__main__": diff --git a/ImageD11/rotdex.py b/ImageD11/rotdex.py index eb7b2f82..dd44d95f 100644 --- a/ImageD11/rotdex.py +++ b/ImageD11/rotdex.py @@ -1,10 +1,8 @@ - from __future__ import print_function from ImageD11.columnfile import columnfile from ImageD11.parameters import read_par_file -from ImageD11.unitcell import unitcell_from_parameters -from ImageD11.grain import read_grain_file, write_grain_file +from ImageD11.grain import read_grain_file, write_grain_file from ImageD11 import transform import numpy as np @@ -22,21 +20,16 @@ def getCxyz(colf, pars): pars = parameters for ImageD11 transform module """ wedge = pars.get("wedge") - chi = pars.get("chi") - peaks_xyz = transform.compute_xyz_lab([ colf.sc, - colf.fc ], - **pars.parameters) + chi = pars.get("chi") + peaks_xyz = transform.compute_xyz_lab([colf.sc, colf.fc], **pars.parameters) fun = transform.compute_g_from_k - peaks_Cxyz = fun( peaks_xyz, colf.omega, - wedge=wedge, - chi=chi) + peaks_Cxyz = fun(peaks_xyz, colf.omega, wedge=wedge, chi=chi) b = np.zeros(peaks_Cxyz.shape) - b[0,:] = 1.0/pars.get("wavelength") - beam_Cxyz = fun( b, colf.omega, - wedge=wedge, - chi=chi) + b[0, :] = 1.0 / pars.get("wavelength") + beam_Cxyz = fun(b, colf.omega, wedge=wedge, chi=chi) return peaks_Cxyz, beam_Cxyz + def compute_Cgve(txyz, peaks_Cxyz, beam_Cxyz, wavelength): """ Computes g-vectors from spots and beam in crystal frame @@ -50,13 +43,14 @@ def compute_Cgve(txyz, peaks_Cxyz, beam_Cxyz, wavelength): # Output ray - full length r_f = (peaks_Cxyz.T - txyz).T # Normalise to unit vector and scale for wavelength - fac = wavelength * np.sqrt( (r_f*r_f).sum(axis=0) ) - np.divide( r_f, fac, r_f ) + fac = wavelength * np.sqrt((r_f * r_f).sum(axis=0)) + np.divide(r_f, fac, r_f) # Compute scattering vector - np.subtract( r_f, beam_Cxyz, r_f ) + np.subtract(r_f, beam_Cxyz, r_f) return r_f -def compute_dgdt( txyz, peaks_Cxyz, beam_Cxyz, wavelength): + +def compute_dgdt(txyz, peaks_Cxyz, beam_Cxyz, wavelength): """ Compute the gvectors and the derivatives with respect to tx,ty,tz txyz = (3,) origin position @@ -66,32 +60,32 @@ def compute_dgdt( txyz, peaks_Cxyz, beam_Cxyz, wavelength): wavelength = float radation, normalises length returns scattering vectors (g-vectors) and d(g)/d(txyz) """ - r = (peaks_Cxyz.T - txyz).T # 3xn + r = (peaks_Cxyz.T - txyz).T # 3xn # Normalise to unit vector and scale for wavelength - f = wavelength * np.sqrt( (r*r).sum(axis=0) ) + f = wavelength * np.sqrt((r * r).sum(axis=0)) # d|r| -> r/|r| and f/wvln = |r| - dfdT = - wavelength * wavelength * r / f # 3xn + dfdT = -wavelength * wavelength * r / f # 3xn # over-writes in place - np.divide( r, f, r ) + np.divide(r, f, r) # quotient rule d(r/f) : (r'.f - r.f')/f.f - drdT = np.zeros((3,3,len(f))) + drdT = np.zeros((3, 3, len(f))) # d( r/f )/dT -> r is a vector, r[0], r[1], r[2] # f is a scalar # o = np.ones(len(f)) # please derive/explain this... note r is already r/f - drdT[0] = ( (dfdT[0] * r).T + (1,0,0) ).T / f - drdT[1] = ( (dfdT[1] * r).T + (0,1,0) ).T / f - drdT[2] = ( (dfdT[2] * r).T + (0,0,1) ).T / f - np.subtract( r, beam_Cxyz, r ) + drdT[0] = ((dfdT[0] * r).T + (1, 0, 0)).T / f + drdT[1] = ((dfdT[1] * r).T + (0, 1, 0)).T / f + drdT[2] = ((dfdT[2] * r).T + (0, 0, 1)).T / f + np.subtract(r, beam_Cxyz, r) return r, drdT -def fit_ub_t( ub, translation, hkl, peaks_Cxyz, beam_Cxyz, wavelength): +def fit_ub_t(ub, translation, hkl, peaks_Cxyz, beam_Cxyz, wavelength): """ Fits the ub and grain origin to a list of assigned peaks All unit cell and orientations parameters are free Runs 2 cycles (empirically this converges) - + ub = (3,3) input UB matrix (so that h ~= UB.g) translation = (3,) grain origin hkl = (3,n) integer peak assignments @@ -101,10 +95,10 @@ def fit_ub_t( ub, translation, hkl, peaks_Cxyz, beam_Cxyz, wavelength): returns fitted UB and translation """ npk = len(hkl[0]) - dg = np.zeros( (12,3,npk) ) + dg = np.zeros((12, 3, npk)) for i in range(3): for j in range(3): - dg[i*3+j,i] = hkl[j] # ub in 0->8 + dg[i * 3 + j, i] = hkl[j] # ub in 0->8 # Why? : # d(gcalc3)/d(ub9) = h # dgdub = np.zeros((3,3,3,c.nrows)) @@ -131,91 +125,86 @@ def fit_ub_t( ub, translation, hkl, peaks_Cxyz, beam_Cxyz, wavelength): # empirically it converges to 3 decimal places in 1 cycle # ...since: dgobsdt seems to depend on t we run a couple of cycles for _ in range(2): - gobs, dgobsdt = compute_dgdt( tnew, peaks_Cxyz, beam_Cxyz, wavelength ) + gobs, dgobsdt = compute_dgdt(tnew, peaks_Cxyz, beam_Cxyz, wavelength) # Note dgdub=h does not change here for i in range(3): - dg[i+9] = dgobsdt[i] # translation at 9,10,11 - gcalc = np.dot( ubnew , hkl ) # 3xn + dg[i + 9] = dgobsdt[i] # translation at 9,10,11 + gcalc = np.dot(ubnew, hkl) # 3xn gdiff = gcalc - gobs # print((gdiff*gdiff).ravel().sum(),tnew) - dg.shape = 12,3*npk - mat = np.dot( dg, dg.T ) - rhs = np.dot( dg, gdiff.ravel() ) - imat = np.linalg.inv( mat ) - shifts = np.dot (imat, rhs ) - dg.shape = (12,3,npk) - ubnew = ubnew - np.reshape(shifts[:9],(3,3)) - tnew = tnew - shifts[9:] + dg.shape = 12, 3 * npk + mat = np.dot(dg, dg.T) + rhs = np.dot(dg, gdiff.ravel()) + imat = np.linalg.inv(mat) + shifts = np.dot(imat, rhs) + dg.shape = (12, 3, npk) + ubnew = ubnew - np.reshape(shifts[:9], (3, 3)) + tnew = tnew - shifts[9:] return ubnew, tnew -def fitagrain( gr, pars ): - """ - """ + +def fitagrain(gr, pars): + """ """ t = gr.translation.copy() ub = gr.ub.copy() - pks, beam = getCxyz( gr, pars ) + pks, beam = getCxyz(gr, pars) # This is a little ugly. Can the wavelength live in the B matrix? - hi = np.round( np.dot( gr.ubi, compute_Cgve( t, pks, beam, pars.get("wavelength") ) ) ) - ubnew, tnew = fit_ub_t( ub, t, hi, pks, beam, pars.get("wavelength")) + hi = np.round(np.dot(gr.ubi, compute_Cgve(t, pks, beam, pars.get("wavelength")))) + ubnew, tnew = fit_ub_t(ub, t, hi, pks, beam, pars.get("wavelength")) return ubnew, tnew def main(): import sys, time + c = columnfile(sys.argv[1]) p = read_par_file(sys.argv[2]) - u = unitcell_from_parameters(p) + # u = unitcell_from_parameters(p) gl = read_grain_file(sys.argv[3]) if gl[0].translation is None: - gl[0].translation = np.array((0.,0.,0.)) + gl[0].translation = np.array((0.0, 0.0, 0.0)) start = time.time() # Setup and assign hkls w = p.get("wavelength") - peaks_Cxyz, beam_Cxyz = getCxyz( c, p ) + peaks_Cxyz, beam_Cxyz = getCxyz(c, p) t = gl[0].translation.copy() ub = gl[0].ub.copy() ubi = gl[0].ubi.copy() gve = compute_Cgve(t, peaks_Cxyz, beam_Cxyz, w) - hi = np.round( np.dot( ubi, gve ) ) - lastgof = 1e9 - ubn, tn = fit_ub_t( ub, t, hi, peaks_Cxyz, beam_Cxyz, w) - print("Before\nt=",gl[0].translation) - print("UB=",gl[0].ub) - gl[0].set_ubi( np.linalg.inv( ubn ) ) - gl[0].translation = tn + hi = np.round(np.dot(ubi, gve)) + # lastgof = 1e9 + ubn, tn = fit_ub_t(ub, t, hi, peaks_Cxyz, beam_Cxyz, w) + print("Before\nt=", gl[0].translation) + print("UB=", gl[0].ub) + gl[0].set_ubi(np.linalg.inv(ubn)) + gl[0].translation = tn dt = time.time() - start - print("time calculating",dt,"gps",1/dt) - print("After\nt=",gl[0].translation) - print("UB=",gl[0].ub) - write_grain_file(sys.argv[4],gl) + print("time calculating", dt, "gps", 1 / dt) + print("After\nt=", gl[0].translation) + print("UB=", gl[0].ub) + write_grain_file(sys.argv[4], gl) def main2(): import sys + c = columnfile(sys.argv[1]) p = read_par_file(sys.argv[2]) gl = read_grain_file(sys.argv[3]) - for i,g in enumerate(gl): + for i, g in enumerate(gl): mask = c.labels == i - g.sc = np.compress( mask, c.sc ) - g.fc = np.compress( mask, c.fc ) - g.omega = np.compress( mask, c.omega ) - ubnew, tnew = fitagrain( g, p ) - g.set_ubi( np.linalg.inv( ubnew ) ) + g.sc = np.compress(mask, c.sc) + g.fc = np.compress(mask, c.fc) + g.omega = np.compress(mask, c.omega) + ubnew, tnew = fitagrain(g, p) + g.set_ubi(np.linalg.inv(ubnew)) g.translation[:] = tnew print(i, len(g.sc), tnew) - write_grain_file( sys.argv[4], gl ) - - + write_grain_file(sys.argv[4], gl) - -if __name__=="__main__": +if __name__ == "__main__": main2() - - - - # python rotdex.py g10.flt cubic.par g10.ubi g10.t1 diff --git a/ImageD11/rsv.py b/ImageD11/rsv.py index b86dc7db..4458aa28 100644 --- a/ImageD11/rsv.py +++ b/ImageD11/rsv.py @@ -1,4 +1,3 @@ - from __future__ import print_function, division """ @@ -29,11 +28,13 @@ import logging, numpy, h5py + class rsv(object): """ A reciprocal space volume """ - def __init__(self, dimensions, bounds, np, **kwds ): + + def __init__(self, dimensions, bounds, np, **kwds): """ dimensions = NX*NY*NZ grid for the space uspace = a 3x3 matrix describing the grid @@ -41,228 +42,233 @@ def __init__(self, dimensions, bounds, np, **kwds ): pixel at vol[i,j,k] comes from: [i,j,k] = (uspace).gvec gvec is the scattering vector in reciprocal space - so uspace are vectors in real space + so uspace are vectors in real space uorigin = a 3 vector giving the position of the [0,0,0] pixel """ assert len(dimensions) == 3 - self.SIG = None # signal - self.MON = None # monitor - self.NR = [int(x) for x in dimensions] # dimensions + self.SIG = None # signal + self.MON = None # monitor + self.NR = [int(x) for x in dimensions] # dimensions self.NORMED = None self.bounds = bounds # boundary in reciprocal space - self.np = np # px per hkl + self.np = np # px per hkl self.metadata = kwds # Do not allocate in constructor for now - make caller care about # memory usage # self.allocate_vol() - def allocate_vol( self ): - """ + def allocate_vol(self): + """ Allocates memory for a volume data """ if self.NR is None: raise Exception("Cannot allocate rsv") - total = int(self.NR[0]*self.NR[1]*self.NR[2]) - print("rsv: memory used = %.2f MB"%(total*8.0/1024/1024)) - print("dim: %d %d %d"%(self.NR[0],self.NR[1],self.NR[2])) - self.SIG = numpy.zeros( total, numpy.float32 ) - self.MON = numpy.zeros( total, numpy.float32 ) - - - def normalise( self , savespace = True): + total = int(self.NR[0] * self.NR[1] * self.NR[2]) + print("rsv: memory used = %.2f MB" % (total * 8.0 / 1024 / 1024)) + print("dim: %d %d %d" % (self.NR[0], self.NR[1], self.NR[2])) + self.SIG = numpy.zeros(total, numpy.float32) + self.MON = numpy.zeros(total, numpy.float32) + + def normalise(self, savespace=True): """ Return the normalised but avoid divide by zero """ if savespace: self.NORMED = self.SIG else: - self.NORMED = numpy.zeros( self.SIG.shape, numpy.float32 ) + self.NORMED = numpy.zeros(self.SIG.shape, numpy.float32) import sys + print(self.NORMED.shape[0]) - for i in range( self.NORMED.shape[0] ): - print(i,"\r", end=' ') + for i in range(self.NORMED.shape[0]): + print(i, "\r", end=" ") sys.stdout.flush() msk = (self.MON[i] < 0.1).astype(numpy.uint8) - numpy.add( self.MON[i], msk, self.MON[i]) - numpy.divide( self.SIG[i], - self.MON[i], # divide by mon + 1 - self.NORMED[i] ) - numpy.subtract( self.MON[i], msk, self.MON[i]) - numpy.subtract( 1, msk, msk ) - numpy.multiply( self.NORMED[i], msk, self.NORMED[i] ) + numpy.add(self.MON[i], msk, self.MON[i]) + numpy.divide(self.SIG[i], self.MON[i], self.NORMED[i]) # divide by mon + 1 + numpy.subtract(self.MON[i], msk, self.MON[i]) + numpy.subtract(1, msk, msk) + numpy.multiply(self.NORMED[i], msk, self.NORMED[i]) print(i) - plnames = { - 0 :0, "h":0, "H":0, - 1 :1, "k":1, "K":1, - 2 :2, "l":2, "L":2, - } - + 0: 0, + "h": 0, + "H": 0, + 1: 1, + "k": 1, + "K": 1, + 2: 2, + "l": 2, + "L": 2, + } def slice(self, plane, num): """ - return signal on plane index num + return signal on plane index num """ if plane not in self.plnames: - raise Exception("Plane should be one of %s"%( - str(self.plnames))) + raise Exception("Plane should be one of %s" % (str(self.plnames))) p = self.plnames[plane] # floor(x+0.5) is nearest integer - ind = int(numpy.floor( num * self.np + 0.5) - self.bounds[p][0]) + ind = int(numpy.floor(num * self.np + 0.5) - self.bounds[p][0]) # convert this back to num - testnum = 1.0*(self.bounds[p][0] + ind )/self.np - if abs(testnum - num)>1e-6: - logging.info("Nearest plane to %f is %f"%(num, testnum)) + testnum = 1.0 * (self.bounds[p][0] + ind) / self.np + if abs(testnum - num) > 1e-6: + logging.info("Nearest plane to %f is %f" % (num, testnum)) if ind < 0 or ind >= self.NR[p]: - print(ind,num,self.np,self.bounds) + print(ind, num, self.np, self.bounds) raise Exception("slice is out of volume bounds") if self.NORMED is None: self.normalise() if len(self.NORMED.shape) == 1: self.NORMED.reshape(self.NR) - if p==0: + if p == 0: return self.NORMED[ind, :, :] - if p==1: + if p == 1: return self.NORMED[:, ind, :] - if p==2: + if p == 2: return self.NORMED[:, :, ind] -def getbounds( vol, plane ): +def getbounds(vol, plane): """ Returns the extent argument to use for pylab.imshow when plotting a plane """ - inds = [0,1,2] - inds.remove( vol.plnames[plane] ) - imin = vol.bounds[inds[0]][0]*1.0/vol.np - imax = vol.bounds[inds[0]][1]*1.0/vol.np - jmin = vol.bounds[inds[1]][0]*1.0/vol.np - jmax = vol.bounds[inds[1]][1]*1.0/vol.np + inds = [0, 1, 2] + inds.remove(vol.plnames[plane]) + imin = vol.bounds[inds[0]][0] * 1.0 / vol.np + imax = vol.bounds[inds[0]][1] * 1.0 / vol.np + jmin = vol.bounds[inds[1]][0] * 1.0 / vol.np + jmax = vol.bounds[inds[1]][1] * 1.0 / vol.np # left, right, top, bottom - return jmin,jmax,imax,imin + return jmin, jmax, imax, imin def writevol(vol, filename): """ Write volume in vol to filename - + Compress -1 is for the zeros, which there might be a lot of """ - if not isinstance( vol, rsv ): + if not isinstance(vol, rsv): raise Exception("First arg to writevol should be an rsv object") for a in [vol.NR, vol.SIG, vol.MON]: if a is None: raise Exception("Cannot save rsv, has not data in it") - volout = h5py.File( filename,"w") + volout = h5py.File(filename, "w") if vol.SIG.dtype != numpy.float32: logging.warning("rsv SIG was not float32, converting") vol.SIG = vol.SIG.astype(numpy.float32) - volout.create_dataset( "signal", - (vol.NR[0],vol.NR[1],vol.NR[2]), - vol.SIG.dtype, - data = vol.SIG, - compression = 'gzip', - compression_opts = 1) + volout.create_dataset( + "signal", + (vol.NR[0], vol.NR[1], vol.NR[2]), + vol.SIG.dtype, + data=vol.SIG, + compression="gzip", + compression_opts=1, + ) if vol.MON.dtype != numpy.float32: logging.warning("rsv MON was not float32, converting") vol.MON = vol.MON.astype(numpy.float32) - volout.create_dataset( "monitor", - (vol.NR[0],vol.NR[1],vol.NR[2]), - vol.MON.dtype, - data = vol.MON, - compression = 'gzip', - compression_opts = 1) - volout.attrs['bounds'] = vol.bounds - volout.attrs['np'] = vol.np + volout.create_dataset( + "monitor", + (vol.NR[0], vol.NR[1], vol.NR[2]), + vol.MON.dtype, + data=vol.MON, + compression="gzip", + compression_opts=1, + ) + volout.attrs["bounds"] = vol.bounds + volout.attrs["np"] = vol.np for key, value in vol.metadata.items(): - volout.attrs[key]=value + volout.attrs[key] = value volout.flush() volout.close() - + def writenormedvol(vol, filename): """ Write volume in vol to filename - save only the normalised to avoid using so much memory - + Compress -1 is for the zeros, which there might be a lot of """ - if not isinstance( vol, rsv ): + if not isinstance(vol, rsv): raise Exception("First arg to writevol should be an rsv object") - + for a in [vol.NR, vol.NORMED]: if a is None: raise Exception("Cannot save rsv, has not data in it") - volout = h5py.File( filename,"w") + volout = h5py.File(filename, "w") if vol.NORMED.dtype != numpy.float32: logging.warning("rsv NORMED was not float32, converting") vol.NORMED = vol.NORMED.astype(numpy.float32) - volout.create_dataset( "signal", - (vol.NR[0],vol.NR[1],vol.NR[2]), - vol.NORMED.dtype, - data = vol.NORMED, - compression = 'gzip', - compression_opts = 1) - volout.attrs['bounds'] = vol.bounds - volout.attrs['np'] = vol.np + volout.create_dataset( + "signal", + (vol.NR[0], vol.NR[1], vol.NR[2]), + vol.NORMED.dtype, + data=vol.NORMED, + compression="gzip", + compression_opts=1, + ) + volout.attrs["bounds"] = vol.bounds + volout.attrs["np"] = vol.np for key, value in vol.metadata.items(): - volout.attrs[key]=value + volout.attrs[key] = value volout.flush() volout.close() def mem(): - """ debug the memory usage """ + """debug the memory usage""" import os - os.system('ps v -p %s'%(os.getpid())) -def readvol(filename, savespace=False ): + os.system("ps v -p %s" % (os.getpid())) + + +def readvol(filename, savespace=False): """ Read volume from a file returns an rsv object Take care to allocate and read to avoid temporaries """ - + volfile = h5py.File(filename) - if not 'signal' in list(volfile.keys()):#listnames(): - raise Exception("Your file %s is not an rsv"%(filename)) - sig = volfile['signal'] - bounds = volfile.attrs['bounds'] - np = volfile.attrs['np'] - vol = rsv( sig.shape, bounds, np ) + if "signal" not in list(volfile.keys()): # listnames(): + raise Exception("Your file %s is not an rsv" % (filename)) + sig = volfile["signal"] + bounds = volfile.attrs["bounds"] + np = volfile.attrs["np"] + vol = rsv(sig.shape, bounds, np) # allocate array empty - #mem() + # mem() if savespace: vol.SIG = sig else: - vol.SIG = numpy.empty( sig.shape, sig.dtype ) - #mem() - sig.read_direct( vol.SIG ) - #mem() + vol.SIG = numpy.empty(sig.shape, sig.dtype) + # mem() + sig.read_direct(vol.SIG) + # mem() for name, value in volfile.attrs.items(): vol.metadata[name] = value - #mem() - if 'monitor' in list(volfile.keys()):#listnames(): - mon = volfile['monitor'] + # mem() + if "monitor" in list(volfile.keys()): # listnames(): + mon = volfile["monitor"] assert mon.shape == vol.SIG.shape if savespace: vol.MON = mon else: - vol.MON= numpy.empty( mon.shape, mon.dtype) - mon.read_direct( vol.MON ) + vol.MON = numpy.empty(mon.shape, mon.dtype) + mon.read_direct(vol.MON) else: vol.MON = None vol.NORMED = vol.SIG - #mem() + # mem() if savespace: vol.hdf_file_object = volfile else: volfile.close() - #mem() + # mem() return vol - - - - diff --git a/ImageD11/rsv_mapper.py b/ImageD11/rsv_mapper.py index 104b68a4..a1d0db49 100644 --- a/ImageD11/rsv_mapper.py +++ b/ImageD11/rsv_mapper.py @@ -7,9 +7,16 @@ Transfers images into reciprocal space by pixel mapping """ -import numpy, logging -from ImageD11 import parameters, transform, indexing, \ - cImageD11, blobcorrector, rsv, ImageD11options +import numpy +from ImageD11 import ( + parameters, + transform, + indexing, + cImageD11, + blobcorrector, + rsv, + ImageD11options, +) class rsv_mapper(object): @@ -20,15 +27,21 @@ class rsv_mapper(object): frame are fixed. When rotating sample, just rotate these into crystal frame and use as array indices into volume """ - def __init__(self, dims, pars, ubi, - splinefile = None, - np=16, - border = 10, - omegarange = list(range(360)), - maxpix = None, - mask = None): + + def __init__( + self, + dims, + pars, + ubi, + splinefile=None, + np=16, + border=10, + omegarange=list(range(360)), + maxpix=None, + mask=None, + ): """ - Create a new mapper intance. It will transform images into + Create a new mapper intance. It will transform images into reciprocal space (has its own rsv object holding the space) dims - image dimensions par - ImageD11 parameter filename for experiment @@ -39,13 +52,14 @@ def __init__(self, dims, pars, ubi, maxpix - value for saturated pixels to be ignored mask - fit2d style mask for removing bad pixels / border """ - if len(dims)!=2: raise Exception("For 2D dims!") + if len(dims) != 2: + raise Exception("For 2D dims!") self.dims = dims print(dims) # Experiment parameters - if not isinstance( pars, parameters.parameters): + if not isinstance(pars, parameters.parameters): raise Exception("Pars should be an ImageD11 parameters object") - for key in ["distance", "wavelength"]: #etc + for key in ["distance", "wavelength"]: # etc assert key in pars.parameters self.pars = pars @@ -59,7 +73,7 @@ def __init__(self, dims, pars, ubi, self.mask = mask if self.mask is not None: assert self.mask.shape == self.dims, "Mask dimensions mush match image" - + # spatial if splinefile is None: self.spatial = blobcorrector.perfect() @@ -67,19 +81,18 @@ def __init__(self, dims, pars, ubi, self.spatial = blobcorrector.correctorclass(splinefile) # npixels - self.np = np + self.np = np - self.uspace = np*ubi + self.uspace = np * ubi - self.find_vol( border = border, omegarange = omegarange ) + self.find_vol(border=border, omegarange=omegarange) - self.rsv.metadata['ubi'] = ubi - self.rsv.metadata['uspace'] = self.uspace + self.rsv.metadata["ubi"] = ubi + self.rsv.metadata["uspace"] = self.uspace # Make and cache the k vectors self.make_k_vecs() - - - def find_vol( self, border , omegarange ): + + def find_vol(self, border, omegarange): """ find limiting volume The four image corners over 360 degrees @@ -92,76 +105,87 @@ def find_vol( self, border , omegarange ): """ # Note that ImageD11 peaks are [slow, fast] # 1 2 3 - # 4 5 6 + # 4 5 6 # 7 8 9 - p1 = [ -border, self.dims[0]/2, self.dims[0]+border , - self.dims[0]/2, self.dims[0]+border ,-border, - self.dims[0]+border, -border, self.dims[0]/2 ] - p2 = [ -border, self.dims[1]/2, self.dims[1]+border , - -border, self.dims[1]/2, self.dims[1]+border , - -border, self.dims[1]/2, self.dims[1]+border ] + p1 = [ + -border, + self.dims[0] / 2, + self.dims[0] + border, + self.dims[0] / 2, + self.dims[0] + border, + -border, + self.dims[0] + border, + -border, + self.dims[0] / 2, + ] + p2 = [ + -border, + self.dims[1] / 2, + self.dims[1] + border, + -border, + self.dims[1] / 2, + self.dims[1] + border, + -border, + self.dims[1] / 2, + self.dims[1] + border, + ] for i in range(9): - p1[i], p2[i] = self.spatial.correct( p1[i], p2[i] ) - peaks = [p1*len(omegarange), p2*len(omegarange)] - om = numpy.array(list(omegarange)*9, numpy.float32) - tth, eta = transform.compute_tth_eta( peaks, - **self.pars.get_parameters() ) - #print "tth",tth.min(),tth.max() - #print "eta",eta.min(),eta.max() + p1[i], p2[i] = self.spatial.correct(p1[i], p2[i]) + peaks = [p1 * len(omegarange), p2 * len(omegarange)] + om = numpy.array(list(omegarange) * 9, numpy.float32) + tth, eta = transform.compute_tth_eta(peaks, **self.pars.get_parameters()) + # print "tth",tth.min(),tth.max() + # print "eta",eta.min(),eta.max() assert om.shape == tth.shape - gv = transform.compute_g_vectors( tth, eta, om, - self.pars.get('wavelength'), - float(self.pars.get('wedge')), - float(self.pars.get('chi'))) - - + gv = transform.compute_g_vectors( + tth, + eta, + om, + self.pars.get("wavelength"), + float(self.pars.get("wedge")), + float(self.pars.get("chi")), + ) + # Rotate g-vectors into target volume - hkls = numpy.dot( self.uspace, gv ) + hkls = numpy.dot(self.uspace, gv) # print "Ranges for RSV" - bounds=numpy.zeros((3,2)) - npv =numpy.zeros(3) + bounds = numpy.zeros((3, 2)) + npv = numpy.zeros(3) for i in range(3): bounds[i][0] = numpy.floor(hkls[i].min()) bounds[i][1] = numpy.ceil(hkls[i].max()) - npv[i] = (bounds[i][1]-bounds[i][0]) + 1 + npv[i] = (bounds[i][1] - bounds[i][0]) + 1 self.bounds = bounds - self.rsv = rsv.rsv( npv , bounds=bounds, np=self.np ) + self.rsv = rsv.rsv(npv, bounds=bounds, np=self.np) # Cross your fingers and.... self.rsv.allocate_vol() - - - def make_k_vecs( self ): + def make_k_vecs(self): """ Generate the k vectors from the experiment parameters given in constructor """ - xim, yim = self.spatial.make_pixel_lut( self.dims ) - peaks = [ numpy.ravel(xim), numpy.ravel(yim) ] + xim, yim = self.spatial.make_pixel_lut(self.dims) + peaks = [numpy.ravel(xim), numpy.ravel(yim)] # First, x, is the slow pixel direction, should not change # when raveled - #for i in range(10): + # for i in range(10): # print "slow, fast" # print peaks[0][i],peaks[1][i] - assert abs(peaks[0][10]-peaks[0][0]) < 3 + assert abs(peaks[0][10] - peaks[0][0]) < 3 # Second, y, is the fast, should change by ~ 1 pixel per pixel - assert abs(peaks[1][10]-peaks[1][0]-10) < 3 - tth, eta = transform.compute_tth_eta( peaks, - **self.pars.get_parameters() ) - self.k = transform.compute_k_vectors(tth, eta, - self.pars.get('wavelength')) - + assert abs(peaks[1][10] - peaks[1][0] - 10) < 3 + tth, eta = transform.compute_tth_eta(peaks, **self.pars.get_parameters()) + self.k = transform.compute_k_vectors(tth, eta, self.pars.get("wavelength")) + # FIXME # This should be something like domega/dk where # dk [ k(omega=0) - k(omega=1) ] - - self.lorfac = numpy.ones( self.dims[0]*self.dims[1], - numpy.float32) - + self.lorfac = numpy.ones(self.dims[0] * self.dims[1], numpy.float32) - def add_image( self, om, data ): + def add_image(self, om, data): """ RSV = bounds of reciprocal space vol NR = dims of RSV @@ -172,42 +196,46 @@ def add_image( self, om, data ): """ dat = numpy.ravel(data).astype(numpy.float32) assert len(dat) == len(self.k[0]), "dimensioning issue" - + # hkl = ubi.( gtok. k ) - gvm = transform.compute_g_from_k(numpy.eye(3), - # this transform module sucks - om*float(self.pars.get('omegasign')), - wedge = float(self.pars.get('wedge')), - chi = float(self.pars.get('chi'))) - tmat = numpy.dot( self.uspace , gvm ) - hkls = numpy.dot( tmat , self.k ) + gvm = transform.compute_g_from_k( + numpy.eye(3), + # this transform module sucks + om * float(self.pars.get("omegasign")), + wedge=float(self.pars.get("wedge")), + chi=float(self.pars.get("chi")), + ) + tmat = numpy.dot(self.uspace, gvm) + hkls = numpy.dot(tmat, self.k) # Find a way to test if we are doing the transform OK - + # Bounds checks -# for i in range(3): -# assert hkls[i].min() > self.bounds[i][0], \ -# "%d %s %s"%(i, str(hkls[i].min()),str( self.bounds[i][0])) -# assert hkls[i].max() < self.bounds[i][1], \ -# "%d %s %s"%(i, str(hkls[i].max()),str( self.bounds[i][1])) + # for i in range(3): + # assert hkls[i].min() > self.bounds[i][0], \ + # "%d %s %s"%(i, str(hkls[i].min()),str( self.bounds[i][0])) + # assert hkls[i].max() < self.bounds[i][1], \ + # "%d %s %s"%(i, str(hkls[i].max()),str( self.bounds[i][1])) - NR = self.rsv.NR + NR = self.rsv.NR # hkls[0] is the slowest index. integer steps of NR[1]*NR[2] - ind = numpy.floor(hkls[0]+0.5-self.bounds[0][0]).astype(numpy.intp) - numpy.multiply( ind, NR[1]*NR[2] , ind ) + ind = numpy.floor(hkls[0] + 0.5 - self.bounds[0][0]).astype(numpy.intp) + numpy.multiply(ind, NR[1] * NR[2], ind) assert ind.dtype == numpy.intp # hkls[1] is faster. Steps by NR[2] only - numpy.add( ind, NR[2]*numpy.floor( - hkls[1] + 0.5 - self.bounds[1][0]).astype(numpy.intp), - ind ) - numpy.add( ind, numpy.floor( - hkls[2] + 0.5 - self.bounds[2][0]).astype(numpy.intp), - ind ) + numpy.add( + ind, + NR[2] * numpy.floor(hkls[1] + 0.5 - self.bounds[1][0]).astype(numpy.intp), + ind, + ) + numpy.add( + ind, numpy.floor(hkls[2] + 0.5 - self.bounds[2][0]).astype(numpy.intp), ind + ) # # # if self.maxpix is not None: - msk = numpy.where( dat > self.maxpix, 0, 1).astype(numpy.uint8) + msk = numpy.where(dat > self.maxpix, 0, 1).astype(numpy.uint8) else: msk = None @@ -217,115 +245,160 @@ def add_image( self, om, data ): msk = self.mask else: numpy.multiply(msk, numpy.ravel(self.mask), msk) - + # cases: # maxpix only == msk # mask only == msk # maxpix and mask == msk - # neither - + # neither if msk is not None: numpy.multiply(dat, msk, dat) - cImageD11.put_incr( self.rsv.SIG, - ind, - dat ) - cImageD11.put_incr( self.rsv.MON, - ind, - self.lorfac * msk) + cImageD11.put_incr(self.rsv.SIG, ind, dat) + cImageD11.put_incr(self.rsv.MON, ind, self.lorfac * msk) else: - cImageD11.put_incr( self.rsv.SIG, - ind, - dat ) - cImageD11.put_incr( self.rsv.MON, - ind, - self.lorfac) + cImageD11.put_incr(self.rsv.SIG, ind, dat) + cImageD11.put_incr(self.rsv.MON, ind, self.lorfac) return def writevol(self, filename): """ Save the volume in a hdf file """ - rsv.writevol( self.rsv, filename ) - + rsv.writevol(self.rsv, filename) from ImageD11 import ImageD11_file_series + def get_options(parser): """ Command line interface for making a mapping Add our options to a parser object """ - parser = ImageD11_file_series.get_options( parser ) - - parser.add_argument("-p", "--pars", action="store", - dest = "pars", default = None, - type=ImageD11options.ParameterFileType(mode='r'), - help = "ImageD11 parameter file for experiment") - - parser.add_argument("-o", "--output", action="store", - dest = "output", default = None, - type=ImageD11options.HdfFileType(mode='r'), - help = "Name of hdf5 output file") - - parser.add_argument("-s", "--splinefile", action="store", - dest = "spline", default = None, - type=ImageD11options.SplineFileType(mode='r'), - help = "Name of fit2d spline file for spatial dist") - - parser.add_argument("-u", "--ubifile", action="store", - dest = "ubifile", default = None, - type = ImageD11options.UbiFileType(mode='r'), - help = "Name of ubi file (first matrix is used)") - - parser.add_argument("-x", "--npixels", action="store", type=int, - dest = "npixels", default = 16, - help = "Number of pixels in reciprocal space map per integer hkl [16]") - - parser.add_argument("-i", "--images", action="store", type=int, - dest = "images", default = None, - help = "Number of images to process [all]") - - parser.add_argument("-b", "--border", action="store", type=int, - dest = "border", default = 10, - help = "Border around images to allocate space, px [10]") - parser.add_argument("-t", "--saturation", action="store", type=float, - dest = "maxpix", default = None, - help = "Saturation value for excluding pixels") - - - #parser.add_argument("-t", "--testcolfile", action="store", type="string", + parser = ImageD11_file_series.get_options(parser) + + parser.add_argument( + "-p", + "--pars", + action="store", + dest="pars", + default=None, + type=ImageD11options.ParameterFileType(mode="r"), + help="ImageD11 parameter file for experiment", + ) + + parser.add_argument( + "-o", + "--output", + action="store", + dest="output", + default=None, + type=ImageD11options.HdfFileType(mode="r"), + help="Name of hdf5 output file", + ) + + parser.add_argument( + "-s", + "--splinefile", + action="store", + dest="spline", + default=None, + type=ImageD11options.SplineFileType(mode="r"), + help="Name of fit2d spline file for spatial dist", + ) + + parser.add_argument( + "-u", + "--ubifile", + action="store", + dest="ubifile", + default=None, + type=ImageD11options.UbiFileType(mode="r"), + help="Name of ubi file (first matrix is used)", + ) + + parser.add_argument( + "-x", + "--npixels", + action="store", + type=int, + dest="npixels", + default=16, + help="Number of pixels in reciprocal space map per integer hkl [16]", + ) + + parser.add_argument( + "-i", + "--images", + action="store", + type=int, + dest="images", + default=None, + help="Number of images to process [all]", + ) + + parser.add_argument( + "-b", + "--border", + action="store", + type=int, + dest="border", + default=10, + help="Border around images to allocate space, px [10]", + ) + parser.add_argument( + "-t", + "--saturation", + action="store", + type=float, + dest="maxpix", + default=None, + help="Saturation value for excluding pixels", + ) + + # parser.add_argument("-t", "--testcolfile", action="store", type="string", # dest = "testcolfile", default=None, # help = "A columnfile to test geometry") - parser.add_argument("-c", "--subslice", action="store", type=int, - dest = "subslice", default=1, - help = "Number of omega subslices to repeat images") + parser.add_argument( + "-c", + "--subslice", + action="store", + type=int, + dest="subslice", + default=1, + help="Number of omega subslices to repeat images", + ) + + parser.add_argument( + "--maskfilename", + action="store", + type=str, + dest="maskfilename", + default=None, + help="Mask image (fit2d style)", + ) - parser.add_argument("--maskfilename", action="store", type=str, - dest = "maskfilename", default=None, - help = "Mask image (fit2d style)" ) - return parser - - - + def main(): """ A user interface """ import sys, time, os, logging + start = time.time() - root = logging.getLogger('') + root = logging.getLogger("") root.setLevel(logging.WARNING) try: from argparse import ArgumentParser + parser = ArgumentParser() - parser = get_options( parser ) + parser = get_options(parser) options, args = parser.parse_known_args() except SystemExit: raise @@ -333,101 +406,104 @@ def main(): parser.print_help() print("\nProblem with your options:") raise - + if options.output is None: print("You must supply an output file (-o vol.h5)") sys.exit() - if os.path.exists( options.output ): - print("I would overwrite your output file",options.output) + if os.path.exists(options.output): + print("I would overwrite your output file", options.output) print("If you really want that then delete it first and re-run") sys.exit() - + try: if options.pars is None: print("You must supply a parameter file, -p file.pars") sys.exit() pars = parameters.parameters() pars.loadparameters(options.pars) - print("Got parameters from",options.pars) + print("Got parameters from", options.pars) pd = pars.get_parameters() names = list(pd.keys()) names.sort() for name in names: - print("%30s %s"%(name, pd[name])) + print("%30s %s" % (name, pd[name])) except: - print("Problem with parameters:",options.pars) + print("Problem with parameters:", options.pars) raise try: if options.ubifile is None: print("You must supply an input ubifile") ubi = indexing.readubis(options.ubifile)[0] - print("UBI:\n",ubi) + print("UBI:\n", ubi) print("Cell parameters:") - print("%.5f %.5f %.5f %.4f %.4f %.4f" % \ - indexing.ubitocellpars(ubi)) + print("%.5f %.5f %.5f %.4f %.4f %.4f" % indexing.ubitocellpars(ubi)) except: - print("Problem with ubi file:",options.ubifile) + print("Problem with ubi file:", options.ubifile) raise if options.maskfilename is not None: from fabio.openimage import openimage + try: - mask = ( openimage( options.maskfilename ).data == 0 ) + mask = openimage(options.maskfilename).data == 0 except: - print("Problem with your mask image",options.maskfilename) + print("Problem with your mask image", options.maskfilename) raise - print("Using a mask from",options.maskfilename) - print("percent of image used %.3f"%(100.0*mask.sum()/mask.shape[0]/mask.shape[1])) + print("Using a mask from", options.maskfilename) + print( + "percent of image used %.3f" + % (100.0 * mask.sum() / mask.shape[0] / mask.shape[1]) + ) else: mask = None - - + first_image = True nimage = 0 - imagefiles = ImageD11_file_series.get_series_from_options( options, args ) + imagefiles = ImageD11_file_series.get_series_from_options(options, args) - print("Subslicing by",options.subslice) + print("Subslicing by", options.subslice) try: for fim in imagefiles: - - if first_image: # allocate volume, compute k etc - + + if first_image: # allocate volume, compute k etc + first_image = False - - mapper = rsv_mapper( fim.data.shape, - pars , ubi, - options.spline, - np = options.npixels, - border = options.border, - maxpix = options.maxpix, - mask = mask - # FIXME omegarange - ) - - logging.info( "Setting up time %.4f s"%(time.time()-start)) - - + + mapper = rsv_mapper( + fim.data.shape, + pars, + ubi, + options.spline, + np=options.npixels, + border=options.border, + maxpix=options.maxpix, + mask=mask + # FIXME omegarange + ) + + logging.info("Setting up time %.4f s" % (time.time() - start)) + ltp = time.time() - om = float(fim.header['Omega']) - oms = float(fim.header['OmegaStep']) + om = float(fim.header["Omega"]) + oms = float(fim.header["OmegaStep"]) for i in range(options.subslice): - print(".", end=' ') - omv = om + i*oms/options.subslice + print(".", end=" ") + omv = om + i * oms / options.subslice # ==1 : 0*s/1 # ==2 : 0*s/2 , 1*s/2 # ==3 : 0*s/3 , 1*s/3, 2*s/3 etc - mapper.add_image( omv, fim.data ) - - + mapper.add_image(omv, fim.data) + nimage = nimage + 1 - - print(" %d %.3f %.4f s, %.4f s"%(nimage, om, - time.time()-ltp, - time.time()-start)) + + print( + " %d %.3f %.4f s, %.4f s" + % (nimage, om, time.time() - ltp, time.time() - start) + ) if options.images is not None: if nimage >= options.images: break @@ -435,24 +511,21 @@ def main(): except KeyboardInterrupt: print("\nCaught a control-c") if nimage > 0: - print("Problem, trying to save volume so far to:",options.output) - mapper.writevol( options.output ) + print("Problem, trying to save volume so far to:", options.output) + mapper.writevol(options.output) print("Saved what I had") sys.exit() except: print("\nAn error occured") if nimage > 0: - print("Problem, trying to save volume so far to:",options.output) - mapper.writevol( options.output ) + print("Problem, trying to save volume so far to:", options.output) + mapper.writevol(options.output) print("Saved what I had") raise if nimage > 0: - mapper.writevol( options.output ) - - + mapper.writevol(options.output) - -if __name__=="__main__": +if __name__ == "__main__": main() diff --git a/ImageD11/saintraw.py b/ImageD11/saintraw.py index 3b5730ff..24ace27f 100644 --- a/ImageD11/saintraw.py +++ b/ImageD11/saintraw.py @@ -1,4 +1,3 @@ - from __future__ import print_function """ @@ -156,14 +155,13 @@ """ - - class saintraw(object): doc = docs - titles = [] + titles = [] formats = {} - helps = {} + helps = {} + def __init__(self, filename=None): """ filename = filename to read in @@ -171,7 +169,7 @@ def __init__(self, filename=None): self.parsedocs() if filename is not None: self.read(filename) - + def parsedocs(self): """ Parse the saint documentation for the Bruker format @@ -182,7 +180,7 @@ def parsedocs(self): if len(line.rstrip()) == 0: if title is not None: self.formats[title] = format - self.helps[title] = help + self.helps[title] = help self.titles.append(title) title = None format = None @@ -207,21 +205,21 @@ def parsedocs(self): n = 1 if n > 1: for j in range(n): - alltitles.append( t + "_%d" % (j) ) - allformats.append( f ) + alltitles.append(t + "_%d" % (j)) + allformats.append(f) else: - alltitles.append( t ) - allformats.append( f ) - assert f[0] in ["I","F"] + alltitles.append(t) + allformats.append(f) + assert f[0] in ["I", "F"] if f[0] == "I": for dummy in range(n): - funcs.append( int ) + funcs.append(int) if f[0] == "F": for dummy in range(n): - funcs.append( float ) + funcs.append(float) num = int(f[1:].split(".")[0]) for dummy in range(n): - slices.append( slice( i, i + num ) ) + slices.append(slice(i, i + num)) i += num self.alltitles = alltitles self.allformats = allformats @@ -229,13 +227,13 @@ def parsedocs(self): self.slices = slices assert len(funcs) == len(slices) assert len(slices) == len(alltitles) - + def read(self, filename): """ Read an ascii formatted saint reflection file """ self.data = {} - self.lines = open(filename,"r").readlines() + self.lines = open(filename, "r").readlines() for t in self.alltitles: self.data[t] = [] zipped = list(zip(self.alltitles, self.slices, self.funcs)) @@ -243,40 +241,38 @@ def read(self, filename): if line[0] == "!": # Comment line continue - for t,s,f in zipped: + for t, s, f in zipped: # Parse this line try: - self.data[t].append( f( line[s] ) ) + self.data[t].append(f(line[s])) except: - print(t,s,f) + print(t, s, f) raise def condition_filter(self, name, func): """ Remove the peaks according to condition """ - assert len(self.lines) == len(self.data[name] ) - indices = numpy.compress( func( numpy.array( self.data[name]) ) , - list(range(len(self.lines))) ) - self.take( indices ) + assert len(self.lines) == len(self.data[name]) + indices = numpy.compress( + func(numpy.array(self.data[name])), list(range(len(self.lines))) + ) + self.take(indices) def take(self, order): """ Put the peaks in the order given in order (indices) """ for t in list(self.data.keys()): - self.data[t] = numpy.take( self.data[t], - order) - self.lines = list( numpy.take( self.lines, - order)) - + self.data[t] = numpy.take(self.data[t], order) + self.lines = list(numpy.take(self.lines, order)) + def sort(self, name): """ Sort according to a column in self.data """ - order = numpy.argsort( self.data[name] ) - self.take(order) - + order = numpy.argsort(self.data[name]) + self.take(order) def write(self, filename): """ @@ -285,33 +281,35 @@ def write(self, filename): """ outf = open(filename, "w") for line in self.lines: - outf.write( line ) + outf.write(line) # raise Exception("Not implemented writing yet!") def tocolumnfile(self): """ Return a columnfile """ - cof = columnfile.newcolumnfile( self.alltitles ) - dlist = [ self.data[t] for t in self.alltitles ] - cof.bigarray = numpy.array( dlist, float ) - cof.nrows = len( self.data[ self.alltitles[0] ] ) - cof.ncols = len( self.alltitles ) + cof = columnfile.newcolumnfile(self.alltitles) + dlist = [self.data[t] for t in self.alltitles] + cof.bigarray = numpy.array(dlist, float) + cof.nrows = len(self.data[self.alltitles[0]]) + cof.ncols = len(self.alltitles) cof.set_attributes() return cof + if __name__ == "__main__": import sys, time + START = time.time() sra = saintraw() print("Making object", time.time() - START) - + START = time.time() sra.read(sys.argv[1]) print("Reading", time.time() - START) - - print(len(sra.data['IHKL_0'])) + + print(len(sra.data["IHKL_0"])) START = time.time() cra = sra.tocolumnfile() diff --git a/ImageD11/scale.py b/ImageD11/scale.py index cb9b6649..56226de5 100644 --- a/ImageD11/scale.py +++ b/ImageD11/scale.py @@ -1,11 +1,8 @@ - from __future__ import print_function ## Automatically adapted for numpy.oldnumeric Sep 06, 2007 by alter_code1.py - - # ImageD11_v0.4 Software for beamline ID11 # Copyright (C) 2005 Jon Wright # @@ -47,8 +44,9 @@ import numpy, fabio + class scale: - def __init__( self, im1, threshold = None): + def __init__(self, im1, threshold=None): """ Determines scale and offset values for images with respect to each other @@ -56,20 +54,21 @@ def __init__( self, im1, threshold = None): returns a, b """ lsqmat = numpy.zeros((2, 2), float) - dyda = numpy.ravel(im1).astype(float) + dyda = numpy.ravel(im1).astype(float) self.threshold = threshold if threshold is None: self.indices = None self.notindices = None if threshold is not None: - self.indices = numpy.compress(dyda > threshold, - numpy.arange(dyda.shape[0])) - self.notindices = numpy.compress(dyda <= threshold, - numpy.arange(dyda.shape[0])) - assert self.indices.shape[0] + self.notindices.shape[0] == \ - dyda.shape[0], 'problem with threshold' + self.indices = numpy.compress(dyda > threshold, numpy.arange(dyda.shape[0])) + self.notindices = numpy.compress( + dyda <= threshold, numpy.arange(dyda.shape[0]) + ) + assert ( + self.indices.shape[0] + self.notindices.shape[0] == dyda.shape[0] + ), "problem with threshold" dyda = numpy.take(dyda, self.indices) - lsqmat[0, 0] = numpy.sum(dyda*dyda) + lsqmat[0, 0] = numpy.sum(dyda * dyda) lsqmat[1, 0] = lsqmat[0, 1] = numpy.sum(dyda) lsqmat[1, 1] = dyda.shape[0] self.dyda = dyda @@ -78,21 +77,20 @@ def __init__( self, im1, threshold = None): except: print(lsqmat) raise - def scaleimage(self, im2): """ Return a copy of the image scaled to match the class """ grad, off = self.scale(im2) - new = im2/grad - off/grad - new = numpy.where(new<0, 0, new) - if self.notindices is None: + new = im2 / grad - off / grad + new = numpy.where(new < 0, 0, new) + if self.notindices is None: return new else: - numpy.put(new, self.notindices, 0. ) + numpy.put(new, self.notindices, 0.0) return new - + def scale(self, im2): """ Fill out RHS and solve @@ -107,16 +105,14 @@ def scale(self, im2): ans = numpy.dot(self.inverse, [rhs0, rhs1]) return ans[0], ans[1] else: - usedata = numpy.take(numpy.ravel(im2) , self.indices) + usedata = numpy.take(numpy.ravel(im2), self.indices) rhs0 = numpy.sum(self.dyda * usedata.astype(float)) rhs1 = numpy.sum(usedata.astype(float)) ans = numpy.dot(self.inverse, [rhs0, rhs1]) return ans[0], ans[1] - -def scaleseries( target, stem, first, last, - thresh = None, - writeim = None ): + +def scaleseries(target, stem, first, last, thresh=None, writeim=None): """ Scale a series of [bruker] images to the target TODO - make it work with fabio file series @@ -128,24 +124,27 @@ def scaleseries( target, stem, first, last, print("# Using", scaler.indices.shape[0], "pixels above threshold") else: print("# Using all pixels") - print("# Number Filename multiplier(t=" + str(thresh) + \ - ") offset multiplier(all) offset") + print( + "# Number Filename multiplier(t=" + + str(thresh) + + ") offset multiplier(all) offset" + ) if writeim is None: # we only look to see - for i in range(first, last+1): + for i in range(first, last + 1): name = "%s.%04d" % (stem, i) secondimage = fabio.open(name) a, b = scaler.scale(secondimage.data) - print(i, name , a, b, end=' ') - else: # we correct the image - for i in range(first, last+1): + print(i, name, a, b, end=" ") + else: # we correct the image + for i in range(first, last + 1): name = "%s.%04d" % (stem, i) newname = "cor_%s.%04d" % (stem.split("/")[-1], i) secondimage = fabio.open(name) newdata = scaler.scaleimage(secondimage.data) # write out the file secondimage.data = newdata - secondimage.write( newname ) + secondimage.write(newname) print(name, " -> ", newname) sys.stdout.flush() @@ -153,10 +152,11 @@ def scaleseries( target, stem, first, last, if __name__ == "__main__": import sys + FIRSTIMAGE = fabio.open(sys.argv[1]) STEM = sys.argv[2] FIRST = int(sys.argv[3]) - LAST = int(sys.argv[4]) + LAST = int(sys.argv[4]) try: THRES = float(sys.argv[5]) except: @@ -166,4 +166,4 @@ def scaleseries( target, stem, first, last, except: WRIT = None - scaleseries( FIRSTIMAGE, STEM, FIRST, LAST, THRES, WRIT ) + scaleseries(FIRSTIMAGE, STEM, FIRST, LAST, THRES, WRIT) diff --git a/ImageD11/silxGui/silx_colfile.py b/ImageD11/silxGui/silx_colfile.py index c355c83b..d43b0cd8 100644 --- a/ImageD11/silxGui/silx_colfile.py +++ b/ImageD11/silxGui/silx_colfile.py @@ -1,13 +1,11 @@ - -from ImageD11.columnfile import columnfile +from ImageD11.columnfile import columnfile import sys, os -import numpy as np -import silx.gui.qt , silx.gui.plot +import silx.gui.qt, silx.gui.plot -class silxqtcolfile( object ): - def __init__(self, filename=None, xlabel=None, ylabel=None, zlabel=None ): +class silxqtcolfile(object): + def __init__(self, filename=None, xlabel=None, ylabel=None, zlabel=None): """ Reads an ImageD11 peak search output for applying masks on 2D scatter plots """ @@ -16,8 +14,8 @@ def __init__(self, filename=None, xlabel=None, ylabel=None, zlabel=None ): self.zlabel = None self.drawUI() self.loadColfile(filename) - - def drawUI( self ): + + def drawUI(self): """ Sets up the UI | cfname | load | save @@ -25,15 +23,15 @@ def drawUI( self ): | xaxis | yaxis | color | apply_mask | """ self.win = silx.gui.qt.QWidget() - self.scatter_widget = silx.gui.plot.ScatterView( backend='gl' ) + self.scatter_widget = silx.gui.plot.ScatterView(backend="gl") # import pdb; pdb.set_trace() if not self.scatter_widget._plot()._backend.isValid(): - self.scatter_widget = silx.gui.plot.ScatterView( backend='matplotlib' ) - self.scatter_widget.setColormap( silx.gui.colors.Colormap(name='viridis') ) + self.scatter_widget = silx.gui.plot.ScatterView(backend="matplotlib") + self.scatter_widget.setColormap(silx.gui.colors.Colormap(name="viridis")) self.scatter_widget.getMaskToolsWidget().parent().setFloating(True) self.scatter_widget.getMaskToolsWidget().show() # glayout.addWidget( self.scatter_widget, 3, 0, 1, 4 ) - + glayout = silx.gui.qt.QGridLayout(self.win) # 0 1 2 3 # fname load save 0 @@ -41,56 +39,56 @@ def drawUI( self ): # x y z domask 2 # plots - self.fnamelabel = silx.gui.qt.QLabel( 'Columnfile: ' ) - blc=silx.gui.qt.QPushButton( "Load colfile" ) - glayout.addWidget( self.fnamelabel, 0, 0, 1, 2 ) - blc.clicked.connect( self.loadColfile ) + self.fnamelabel = silx.gui.qt.QLabel("Columnfile: ") + blc = silx.gui.qt.QPushButton("Load colfile") + glayout.addWidget(self.fnamelabel, 0, 0, 1, 2) + blc.clicked.connect(self.loadColfile) glayout.addWidget(blc, 0, 2) - bsc=silx.gui.qt.QPushButton( "Save colfile" ) - bsc.clicked.connect( self.savecolfile ) + bsc = silx.gui.qt.QPushButton("Save colfile") + bsc.clicked.connect(self.savecolfile) glayout.addWidget(bsc, 0, 3) - self.pnamelabel = silx.gui.qt.QLabel( 'Parameter file: ' ) - blp=silx.gui.qt.QPushButton( "Load parfile" ) - glayout.addWidget( blp, 1, 3) + self.pnamelabel = silx.gui.qt.QLabel("Parameter file: ") + blp = silx.gui.qt.QPushButton("Load parfile") + glayout.addWidget(blp, 1, 3) blp.clicked.connect(self.loadparameters) - glayout.addWidget( self.pnamelabel, 1, 0, 1, 2 ) + glayout.addWidget(self.pnamelabel, 1, 0, 1, 2) - bxa=silx.gui.qt.QComboBox( None ) - bya=silx.gui.qt.QComboBox( None ) - bza=silx.gui.qt.QComboBox( None ) - self.axisboxes = [bxa,bya,bza] - self.ignoreselect= True + bxa = silx.gui.qt.QComboBox(None) + bya = silx.gui.qt.QComboBox(None) + bza = silx.gui.qt.QComboBox(None) + self.axisboxes = [bxa, bya, bza] + self.ignoreselect = True for b in self.axisboxes: - b.currentIndexChanged.connect( self.select ) - glayout.addWidget(silx.gui.qt.QLabel("x-plot"),2,0) - glayout.addWidget(silx.gui.qt.QLabel("y-plot"),2,1) - glayout.addWidget(silx.gui.qt.QLabel("color"),2,2) - glayout.addWidget(bxa,3,0) - glayout.addWidget(bya,3,1) - glayout.addWidget(bza,3,2) - - bdel=silx.gui.qt.QPushButton( "Delete selected" ) - bdel.clicked.connect( self.applymask ) - glayout.addWidget(bdel,2,3) + b.currentIndexChanged.connect(self.select) + glayout.addWidget(silx.gui.qt.QLabel("x-plot"), 2, 0) + glayout.addWidget(silx.gui.qt.QLabel("y-plot"), 2, 1) + glayout.addWidget(silx.gui.qt.QLabel("color"), 2, 2) + glayout.addWidget(bxa, 3, 0) + glayout.addWidget(bya, 3, 1) + glayout.addWidget(bza, 3, 2) + + bdel = silx.gui.qt.QPushButton("Delete selected") + bdel.clicked.connect(self.applymask) + glayout.addWidget(bdel, 2, 3) self.scatter_widget.show() self.win.show() def setTitles(self): - """ Read the colfile titles into the dropdowns """ - self.ignoreselect=True - for i,b in enumerate(self.axisboxes): + """Read the colfile titles into the dropdowns""" + self.ignoreselect = True + for i, b in enumerate(self.axisboxes): t = b.currentText() b.clear() - b.addItems( self.colfile.titles ) + b.addItems(self.colfile.titles) if t in self.colfile.titles: - b.setCurrentIndex( self.colfile.titles.index( t ) ) + b.setCurrentIndex(self.colfile.titles.index(t)) else: b.setCurrentIndex(i) - self.ignoreselect=False + self.ignoreselect = False - def select(self,col): + def select(self, col): """ Choose the x,y,z axes for plotting """ @@ -100,27 +98,30 @@ def select(self,col): self.scatter_widget.resetZoom() def savecolfile(self): - """ Write after editing """ - filename = silx.gui.qt.QFileDialog(None,"Columnfile").getSaveFileName() + """Write after editing""" + filename = silx.gui.qt.QFileDialog(None, "Columnfile").getSaveFileName() print(filename) try: - self.colfile.writefile( filename[0] ) + self.colfile.writefile(filename[0]) except: - m = silx.gui.qt.QMessageBox.about(self.scatter_widget, - "Fail", "Saving failed - bad filename?" ) + _ = silx.gui.qt.QMessageBox.about( + self.scatter_widget, "Fail", "Saving failed - bad filename?" + ) def loadColfile(self, filename=None): - """ Read in a new file """ + """Read in a new file""" if filename is None or filename is False: - filename = silx.gui.qt.QFileDialog(self.win,"Columnfile").getOpenFileName()[0] + filename = silx.gui.qt.QFileDialog( + self.win, "Columnfile" + ).getOpenFileName()[0] try: - c = columnfile( filename ) + c = columnfile(filename) self.colfile = c except: - print("problem opening file",filename) + print("problem opening file", filename) return - print("loaded file",filename) - self.fnamelabel.setText( "Columnfile: " + self.colfile.filename ) + print("loaded file", filename) + self.fnamelabel.setText("Columnfile: " + self.colfile.filename) self.setTitles() self.update() self.scatter_widget.resetZoom() @@ -128,39 +129,41 @@ def loadColfile(self, filename=None): self.win.activateWindow() def update(self): - """ Refreshes the plot """ + """Refreshes the plot""" self.x = self.colfile.getcolumn(self.axisboxes[0].currentText()) self.y = self.colfile.getcolumn(self.axisboxes[1].currentText()) self.z = self.colfile.getcolumn(self.axisboxes[2].currentText()) self.scatter_widget.getXAxis().setLabel(self.axisboxes[0].currentText()) self.scatter_widget.getYAxis().setLabel(self.axisboxes[1].currentText()) - self.scatter_widget.setGraphTitle('color: ' + self.axisboxes[2].currentText()) - self.scatter_widget.setData( self.x, self.y, self.z ) + self.scatter_widget.setGraphTitle("color: " + self.axisboxes[2].currentText()) + self.scatter_widget.setData(self.x, self.y, self.z) def applymask(self): - """ Applies the scatterplot mask to the colfile """ + """Applies the scatterplot mask to the colfile""" m = self.scatter_widget.getMaskToolsWidget().getSelectionMask() - print("Masking",(m!=0).sum()) - self.colfile.filter( m == 0 ) + print("Masking", (m != 0).sum()) + self.colfile.filter(m == 0) self.update() def loadparameters(self): - fname = silx.gui.qt.QFileDialog(self.win,"Parfile").getOpenFileName()[0] + fname = silx.gui.qt.QFileDialog(self.win, "Parfile").getOpenFileName()[0] if not os.path.exists(fname): - m = silx.gui.qt.QMessageBox.about(self.scatter_widget, - "Fail", "Setting parameters failed - bad filename?" ) - self.colfile.parameters.loadparameters( fname ) + _ = silx.gui.qt.QMessageBox.about( + self.scatter_widget, "Fail", "Setting parameters failed - bad filename?" + ) + self.colfile.parameters.loadparameters(fname) self.colfile.updateGeometry() self.setTitles() - self.pnamelabel.setText("Parameters: "+fname) + self.pnamelabel.setText("Parameters: " + fname) self.update() -if __name__=="__main__": - app = silx.gui.qt.QApplication( sys.argv ) + +if __name__ == "__main__": + app = silx.gui.qt.QApplication(sys.argv) if len(sys.argv) > 1: - qp = silxqtcolfile( sys.argv[1] ) + qp = silxqtcolfile(sys.argv[1]) else: - qp = silxqtcolfile( ) + qp = silxqtcolfile() # Avoid segfault on exceptions sys.excepthook = silx.gui.qt.exceptionHandler app.exec_() diff --git a/ImageD11/silxGui/silx_plot3d.py b/ImageD11/silxGui/silx_plot3d.py index 62acb70b..05ff6c98 100644 --- a/ImageD11/silxGui/silx_plot3d.py +++ b/ImageD11/silxGui/silx_plot3d.py @@ -1,19 +1,13 @@ - - - -from ImageD11.columnfile import columnfile +from ImageD11.columnfile import columnfile import sys -import numpy as np -import silx.gui.qt , silx.gui.plot3d.SceneWindow +import silx.gui.qt, silx.gui.plot3d.SceneWindow from silx.gui.plot3d.tools.PositionInfoWidget import PositionInfoWidget from silx.gui.widgets.BoxLayoutDockWidget import BoxLayoutDockWidget - - if __name__ == "__main__": - colf = columnfile( sys.argv[1] ) - colf.parameters.loadparameters( sys.argv[2] ) + colf = columnfile(sys.argv[1]) + colf.parameters.loadparameters(sys.argv[2]) colf.updateGeometry() qapp = silx.gui.qt.QApplication([]) @@ -22,10 +16,10 @@ window = silx.gui.plot3d.SceneWindow.SceneWindow() sceneWidget = window.getSceneWidget() - sceneWidget.setBackgroundColor((0.1, 0.18, 0.08, 1.)) - sceneWidget.setForegroundColor((1., 1., 1., 1.)) - sceneWidget.setTextColor((0.5, 0.7, 0.7, 1.)) - sceneWidget.setProjection('orthographic') + sceneWidget.setBackgroundColor((0.1, 0.18, 0.08, 1.0)) + sceneWidget.setForegroundColor((1.0, 1.0, 1.0, 1.0)) + sceneWidget.setTextColor((0.5, 0.7, 0.7, 1.0)) + sceneWidget.setProjection("orthographic") positionInfo = PositionInfoWidget() positionInfo.setSceneWidget(sceneWidget) @@ -34,23 +28,23 @@ dock.setWidget(positionInfo) window.addDockWidget(silx.gui.qt.Qt.BottomDockWidgetArea, dock) - x,y,z,values = colf.gx, colf.gy, colf.gz, colf.avg_intensity + x, y, z, values = colf.gx, colf.gy, colf.gz, colf.avg_intensity scatter3d = silx.gui.plot3d.items.Scatter3D() scatter3d.setData(x, y, z, values) # Set scatter3d properties - scatter3d.getColormap().setName('viridis') # Use 'magma' colormap - scatter3d.setSymbol('.') # Use point markers + scatter3d.getColormap().setName("viridis") # Use 'magma' colormap + scatter3d.setSymbol(".") # Use point markers scatter3d.setSymbolSize(11) # Set the size of the markers # Add scatter3d to the scene - sceneWidget.addItem( scatter3d ) + sceneWidget.addItem(scatter3d) # Set scatter3d transform - SIZE=1 + SIZE = 1 scatter3d.setScale(SIZE, SIZE, SIZE) window.show() # Avoid segfault on exceptions sys.excepthook = silx.gui.qt.exceptionHandler - qapp.exec_() \ No newline at end of file + qapp.exec_() diff --git a/ImageD11/silxGui/silx_sptview.py b/ImageD11/silxGui/silx_sptview.py index 09eab0bc..8bed907a 100644 --- a/ImageD11/silxGui/silx_sptview.py +++ b/ImageD11/silxGui/silx_sptview.py @@ -1,42 +1,41 @@ +import sys +import fabio +import silx.gui.qt, silx.gui.plot +from ImageD11.peakmerge import peakmerger -import sys, os -import numpy as np, fabio -import silx.gui.qt , silx.gui.plot +qapp = None -from ImageD11.peakmerge import peakmerger -qapp=None class sptview(object): - def __init__(self, fname=None): self.pm = peakmerger() self.fname = fname - self.pm.readpeaks( fname ) + self.pm.readpeaks(fname) self.select_image(0) self.drawUI() def select_image(self, i): - if i < 0 or i > len(self.pm.images)-1: + if i < 0 or i > len(self.pm.images) - 1: return False im = self.pm.images[i] self.currentnum = i j = im.imagenumber - self.pm.harvestpeaks( numlim=(j-.1,j+0.1) ) - self.frame = fabio.open(im.name) - self.pkx = [ p.x for p in self.pm.allpeaks ] - self.pky = [ p.y for p in self.pm.allpeaks ] - self.pkI = [ p.avg for p in self.pm.allpeaks ] + self.pm.harvestpeaks(numlim=(j - 0.1, j + 0.1)) + self.frame = fabio.open(im.name) + self.pkx = [p.x for p in self.pm.allpeaks] + self.pky = [p.y for p in self.pm.allpeaks] + self.pkI = [p.avg for p in self.pm.allpeaks] return True def next(self): - if self.select_image( self.currentnum + 1): + if self.select_image(self.currentnum + 1): self.changeframe() else: self.warn("No next image") def prev(self): - if self.select_image( self.currentnum - 1): + if self.select_image(self.currentnum - 1): self.changeframe() else: self.warn("No previous image") @@ -45,24 +44,34 @@ def drawUI(self): self.widget = silx.gui.qt.QWidget() self.widget.setWindowTitle("spot viewer") self.layout = silx.gui.qt.QGridLayout() - self.widget.setLayout( self.layout ) - self.spot_label = silx.gui.qt.QLabel( self.fname ) - self.framelabel = silx.gui.qt.QLabel( self.frame.filename ) - self.framelabel.setAlignment( silx.gui.qt.Qt.AlignCenter ) + self.widget.setLayout(self.layout) + self.spot_label = silx.gui.qt.QLabel(self.fname) + self.framelabel = silx.gui.qt.QLabel(self.frame.filename) + self.framelabel.setAlignment(silx.gui.qt.Qt.AlignCenter) self.layout.addWidget(self.spot_label, 0, 0, 1, 3) self.layout.addWidget(self.framelabel, 1, 1) - self.n = silx.gui.qt.QPushButton( "next" ) - self.p = silx.gui.qt.QPushButton( "prev" ) - self.n.clicked.connect( self.next ) - self.p.clicked.connect( self.prev ) - self.layout.addWidget(self.p, 1, 0 ) - self.layout.addWidget(self.n, 1, 2 ) - self.plot = silx.gui.plot.Plot2D( backend='gl') - self.imglabel = self.plot.addImage( self.frame.data, z=0, origin=(-0.5,-0.5), - colormap=silx.gui.colors.Colormap(name='magma')) - self.plotlabel = self.plot.addScatter( self.pky, self.pkx, self.pkI, z=1, symbol='+', - colormap=silx.gui.colors.Colormap(name='viridis')) - self.plot.getScatter( self.plotlabel ).setSymbolSize(10) + self.n = silx.gui.qt.QPushButton("next") + self.p = silx.gui.qt.QPushButton("prev") + self.n.clicked.connect(self.next) + self.p.clicked.connect(self.prev) + self.layout.addWidget(self.p, 1, 0) + self.layout.addWidget(self.n, 1, 2) + self.plot = silx.gui.plot.Plot2D(backend="gl") + self.imglabel = self.plot.addImage( + self.frame.data, + z=0, + origin=(-0.5, -0.5), + colormap=silx.gui.colors.Colormap(name="magma"), + ) + self.plotlabel = self.plot.addScatter( + self.pky, + self.pkx, + self.pkI, + z=1, + symbol="+", + colormap=silx.gui.colors.Colormap(name="viridis"), + ) + self.plot.getScatter(self.plotlabel).setSymbolSize(10) self.plot.setKeepDataAspectRatio(True) self.layout.addWidget(self.plot, 2, 0, 1, 3) self.widget.show() @@ -70,17 +79,18 @@ def drawUI(self): def changeframe(self): d = self.frame.data - im = self.plot.getImage( self.imglabel ) + im = self.plot.getImage(self.imglabel) im.setData(d) - pl = self.plot.getScatter( self.plotlabel ) - pl.setData( self.pky, self.pkx, self.pkI ) - self.framelabel.setText( self.frame.filename ) + pl = self.plot.getScatter(self.plotlabel) + pl.setData(self.pky, self.pkx, self.pkI) + self.framelabel.setText(self.frame.filename) def warn(self, message): silx.gui.qt.QMessageBox.warning(None, "Warning", message) -if __name__=="__main__": + +if __name__ == "__main__": qapp = silx.gui.qt.QApplication([]) - s=sptview( sys.argv[1] ) + s = sptview(sys.argv[1]) s.widget.show() sys.exit(qapp.exec_()) diff --git a/ImageD11/simplex.py b/ImageD11/simplex.py index 78958b2f..bf3bca55 100644 --- a/ImageD11/simplex.py +++ b/ImageD11/simplex.py @@ -1,4 +1,3 @@ - from __future__ import print_function #!/usr/bin/env python @@ -48,8 +47,9 @@ import copy import sys + class Simplex: - def __init__(self, testfunc, guess, increments, kR = -1, kE = 2, kC = 0.5): + def __init__(self, testfunc, guess, increments, kR=-1, kE=2, kC=0.5): """Initializes the simplex. INPUTS ------ @@ -92,7 +92,7 @@ def __init__(self, testfunc, guess, increments, kR = -1, kE = 2, kC = 0.5): self.errors.append(0) self.calculate_errors_at_vertices() - def minimize(self, epsilon = 0.0001, maxiters = 250, monitor = 1): + def minimize(self, epsilon=0.0001, maxiters=250, monitor=1): """Walks to the simplex down to a local minima. INPUTS ------ @@ -143,15 +143,18 @@ def minimize(self, epsilon = 0.0001, maxiters = 250, monitor = 1): S1 = 0.0 for vertex in range(0, self.numvars + 1): - S1 = S1 + (self.errors[vertex] - F2)**2 + S1 = S1 + (self.errors[vertex] - F2) ** 2 T = math.sqrt(S1 / self.numvars) # Optionally, print progress information if monitor: - print('\r' + 72 * ' ', end=' ') - print('\rIteration = %d Best = %f Worst = %f' % \ - (iter,self.errors[self.lowest],self.errors[self.highest]), end=' ') + print("\r" + 72 * " ", end=" ") + print( + "\rIteration = %d Best = %f Worst = %f" + % (iter, self.errors[self.lowest], self.errors[self.highest]), + end=" ", + ) sys.stdout.flush() if T <= epsilon: @@ -214,14 +217,20 @@ def minimize(self, epsilon = 0.0001, maxiters = 250, monitor = 1): def contract_simplex(self): for x in range(0, self.numvars): - self.guess[x] = self.kC * self.simplex[self.highest][x] + (1 - self.kC) * self.simplex[self.numvars + 1][x] + self.guess[x] = ( + self.kC * self.simplex[self.highest][x] + + (1 - self.kC) * self.simplex[self.numvars + 1][x] + ) return # expand: if P is vertex and Q is centroid, alpha-expansion is Q + alpha*(P-Q), # or (1 - alpha)*Q + alpha*P; default alpha is 2.0; agrees with NR def expand_simplex(self): for x in range(0, self.numvars): - self.guess[x] = self.kE * self.guess[x] + (1 - self.kE) * self.simplex[self.numvars + 1][x] + self.guess[x] = ( + self.kE * self.guess[x] + + (1 - self.kE) * self.simplex[self.numvars + 1][x] + ) return # reflect: if P is vertex and Q is centroid, reflection is Q + (Q-P) = 2Q - P, @@ -229,7 +238,10 @@ def expand_simplex(self): def reflect_simplex(self): # loop over variables for x in range(0, self.numvars): - self.guess[x] = self.kR * self.simplex[self.highest][x] + (1 - self.kR) * self.simplex[self.numvars + 1][x] + self.guess[x] = ( + self.kR * self.simplex[self.highest][x] + + (1 - self.kR) * self.simplex[self.numvars + 1][x] + ) # store reflected point in elem. N + 2 self.simplex[self.numvars + 2][x] = self.guess[x] return @@ -241,7 +253,9 @@ def multiple_contract_simplex(self): if vertex == self.lowest: continue for x in range(0, self.numvars): - self.simplex[vertex][x] = 0.5 * (self.simplex[vertex][x] + self.simplex[self.lowest][x]) + self.simplex[vertex][x] = 0.5 * ( + self.simplex[vertex][x] + self.simplex[self.lowest][x] + ) self.calculate_errors_at_vertices() return @@ -274,15 +288,23 @@ def calculate_errors_at_vertices(self): self.errors[vertex] = self.currenterror return + def myfunc(args): - return abs(args[0] * args[0] * args[0] * 5 - args[1] * args[1] * 7 + math.sqrt(abs(args[0])) - 118) + return abs( + args[0] * args[0] * args[0] * 5 + - args[1] * args[1] * 7 + + math.sqrt(abs(args[0])) + - 118 + ) + def main(): s = Simplex(myfunc, [1, 1, 1], [2, 4, 6]) values, err, iter = s.minimize() - print('args = ', values) - print('error = ', err) - print('iterations = ', iter) + print("args = ", values) + print("error = ", err) + print("iterations = ", iter) + -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ImageD11/sinograms/__init__.py b/ImageD11/sinograms/__init__.py index 87c20973..a3b3b86b 100644 --- a/ImageD11/sinograms/__init__.py +++ b/ImageD11/sinograms/__init__.py @@ -1,6 +1,3 @@ - - - """ Instructions: @@ -35,4 +32,3 @@ """ - diff --git a/ImageD11/sinograms/assemble_label.py b/ImageD11/sinograms/assemble_label.py index 0fe0406e..2d332402 100644 --- a/ImageD11/sinograms/assemble_label.py +++ b/ImageD11/sinograms/assemble_label.py @@ -1,6 +1,5 @@ - from __future__ import print_function -import h5py, os, time, numpy as np +import h5py, os, numpy as np """WARNING: work in progresss""" @@ -11,35 +10,69 @@ # ... this is not really needed, but saves redoing the pixel labelling code -SCANMOTORS = ("diffrz", "diffrz_cen360", "diffrz_center", "fpico4", "fpico3", "diffty", "diffty_center", - "rot", "rot_cen360", "rot_center", "fpico6", "dty", "dty_center") -HEADERMOTORS = ("diffty", "diffrz", "samtx", "samty", "samtz", "diffry", "samrx", "samry", - "dty", "rot", "pz", "px", "py", "shtz", "shty", "shtx") +SCANMOTORS = ( + "diffrz", + "diffrz_cen360", + "diffrz_center", + "fpico4", + "fpico3", + "diffty", + "diffty_center", + "rot", + "rot_cen360", + "rot_center", + "fpico6", + "dty", + "dty_center", +) +HEADERMOTORS = ( + "diffty", + "diffrz", + "samtx", + "samty", + "samtz", + "diffry", + "samrx", + "samry", + "dty", + "rot", + "pz", + "px", + "py", + "shtz", + "shty", + "shtx", +) def testready(dsobj): - """ assume they are finish if they exist. Might be still writing ??? """ + """assume they are finish if they exist. Might be still writing ???""" done, todo = dsobj.check_sparse() - return (done>0) and (todo == 0) - + return (done > 0) and (todo == 0) + -def getsparse( dsobj, num, titles = ('row','col','intensity','nnz') ): - """ +def getsparse(dsobj, num, titles=("row", "col", "intensity", "nnz")): + """ dsobj = DataSet object returns the pixels from the sparse segmentation """ - with h5py.File( os.path.join( dsobj.analysispath, dsobj.sparsefiles[num]) , "r" ) as hin: - pixels = { name : hin[dsobj.limapath][name][:] for name in titles } + with h5py.File( + os.path.join(dsobj.analysispath, dsobj.sparsefiles[num]), "r" + ) as hin: + pixels = {name: hin[dsobj.limapath][name][:] for name in titles} return pixels -def harvest_masterfile( dset, outname, - scanmotors=SCANMOTORS, - headermotors=HEADERMOTORS, ): +def harvest_masterfile( + dset, + outname, + scanmotors=SCANMOTORS, + headermotors=HEADERMOTORS, +): """ dset = ImageD11.sinograms.dataset.DataSet object outname = sparse file to write - """ + """ opts = { "chunks": (10000,), "maxshape": (None,), @@ -48,31 +81,31 @@ def harvest_masterfile( dset, outname, } with h5py.File(outname, "a") as hout: hout.attrs["h5input"] = dset.masterfile - print("Harvesting",dset.masterfile,end=": ") + print("Harvesting", dset.masterfile, end=": ") with h5py.File(dset.masterfile, "r") as hin: for scan in dset.scans: gin = hin[scan] bad = False - for check in ('title','measurement','measurement/'+dset.detector): + for check in ("title", "measurement", "measurement/" + dset.detector): if check not in hin[scan]: - print(scan,"missing",check,'skipping') + print(scan, "missing", check, "skipping") bad = True if bad: print("Skipping", scan) continue - title = hin[scan]["title"][()] g = hout.require_group(scan) + g.title = hin[scan]["title"][()] gm = g.require_group("measurement") for m in scanmotors: # vary : many if m in gin["measurement"]: - data = data=gin["measurement"][m][:] - ds = gm.require_dataset(m, shape=data.shape, dtype = data.dtype ) + data = data = gin["measurement"][m][:] + ds = gm.require_dataset(m, shape=data.shape, dtype=data.dtype) ds[()] = data gip = g.require_group("instrument/positioners") for m in headermotors: # fixed : scalar - if "instrument/positioners/%s"%(m) in gin: - data=gin["instrument/positioners"][m][()] - ds = gip.require_dataset(m, shape = data.shape, dtype = data.dtype ) + if "instrument/positioners/%s" % (m) in gin: + data = gin["instrument/positioners"][m][()] + ds = gip.require_dataset(m, shape=data.shape, dtype=data.dtype) ds[()] = data try: frms = gin["measurement"][dset.detector] @@ -81,48 +114,52 @@ def harvest_masterfile( dset, outname, print(list(gin)) print(list(gin["measurement"])) print(dset.detector) - raise + raise g.attrs["itype"] = frms.dtype.name g.attrs["nframes"] = frms.shape[0] g.attrs["shape0"] = frms.shape[1] g.attrs["shape1"] = frms.shape[2] - print(scan, end=', ') + print(scan, end=", ") print() - + # Finished with master file. Now harvest the segmented files. idx = 0 - titles = ('row','col','intensity','nnz') - print('Loading pixels:',end=' ') + titles = ("row", "col", "intensity", "nnz") + print("Loading pixels:", end=" ") for scan in dset.scans: - g = hout.require_group( scan ) - for name in 'row', 'col': + g = hout.require_group(scan) + for name in "row", "col": if name not in g: - g.create_dataset(name, shape = (0,), dtype=np.uint16, **opts) + g.create_dataset(name, shape=(0,), dtype=np.uint16, **opts) if "intensity" not in g: - g.create_dataset("intensity", shape = (0,), dtype=g.attrs['itype'], **opts) - nfrm = g.attrs['nframes'] - g.require_dataset("nnz", shape = (nfrm,), dtype=np.uint32) - nstart = nread = npx = pstart = 0 + g.create_dataset( + "intensity", shape=(0,), dtype=g.attrs["itype"], **opts + ) + nfrm = g.attrs["nframes"] + g.require_dataset("nnz", shape=(nfrm,), dtype=np.uint32) + nstart = nread = pstart = 0 while nread < nfrm: - pixels = getsparse( dset, idx, titles ) - idx += 1 # loop over sparse files in this scan - nread = nstart + len(pixels['nnz']) # number of frames in this limafile - g['nnz'][nstart : nread] = pixels['nnz'] + pixels = getsparse(dset, idx, titles) + idx += 1 # loop over sparse files in this scan + nread = nstart + len(pixels["nnz"]) # number of frames in this limafile + g["nnz"][nstart:nread] = pixels["nnz"] nstart = nread - pread = pstart + len(pixels['row']) # number of pixels in this limafile - for name in 'row', 'col', 'intensity': - g[name].resize( (pread, ) ) + pread = pstart + len(pixels["row"]) # number of pixels in this limafile + for name in "row", "col", "intensity": + g[name].resize((pread,)) g[name][pstart:pread] = pixels[name] pstart = pread - print(scan,end=', ') + print(scan, end=", ") print() return outname - -def main( dsname, outname ): - dset = ImageD11.sinograms.dataset.load( dsname ) - harvest_masterfile( dset, outname ) - -if __name__=="__main__": + +def main(dsname, outname): + dset = ImageD11.sinograms.dataset.load(dsname) + harvest_masterfile(dset, outname) + + +if __name__ == "__main__": import sys - main( sys.argv[1], sys.argv[2] ) \ No newline at end of file + + main(sys.argv[1], sys.argv[2]) diff --git a/ImageD11/sinograms/dataset.py b/ImageD11/sinograms/dataset.py index 31c40ac1..fee30e03 100644 --- a/ImageD11/sinograms/dataset.py +++ b/ImageD11/sinograms/dataset.py @@ -1,10 +1,9 @@ - from __future__ import print_function, division import os, h5py, numpy as np import fast_histogram import logging - + """ TO DO: @@ -20,115 +19,120 @@ - indexing and reconstructing grains """ - + + def guess_chunks(name, shape): - if name == 'omega': - return ( shape[0], 1 ) - if name == 'dty': - return ( 1, shape[1] ) + if name == "omega": + return (shape[0], 1) + if name == "dty": + return (1, shape[1]) return shape - - + + class DataSet: - + # simple strings or ints - ATTRNAMES = ( "dataroot", "analysisroot", "sample", "dset", "shape", "dsname", - "datapath", "analysispath", "masterfile", - "limapath" - ) - STRINGLISTS = ( "scans", "imagefiles", "sparsefiles" ) + ATTRNAMES = ( + "dataroot", + "analysisroot", + "sample", + "dset", + "shape", + "dsname", + "datapath", + "analysispath", + "masterfile", + "limapath", + ) + STRINGLISTS = ("scans", "imagefiles", "sparsefiles") # sinograms - NDNAMES = ( "omega", "dty", "nnz", "frames_per_file", "nlm", "frames_per_scan" ) - - def __init__(self, - dataroot = ".", - analysisroot = ".", - sample = "sample", - dset = "dataset"): - """ The things we need to know to process data """ - - self.detector = 'eiger' # frelon3 - self.limapath = None # where is the data in the Lima files - - self.omegamotor = 'rot_center' # diffrz - self.dtymotor = 'dty' # diffty - - self.dataroot = dataroot # folder to find {sample}/{sample}_{dset} - self.analysisroot = analysisroot # where to write or find sparse data - self.sample = sample # from bliss path - self.dset = dset # from bliss path - - self.dsname = "_".join( (self.sample, self.dset ) ) - - self.datapath = os.path.join( self.dataroot, self.sample, self.dsname ) - self.analysispath = os.path.join( self.analysisroot, self.sample, self.dsname ) - self.masterfile = os.path.join( self.datapath, self.dsname + '.h5' ) - + NDNAMES = ("omega", "dty", "nnz", "frames_per_file", "nlm", "frames_per_scan") + + def __init__(self, dataroot=".", analysisroot=".", sample="sample", dset="dataset"): + """The things we need to know to process data""" + + self.detector = "eiger" # frelon3 + self.limapath = None # where is the data in the Lima files + + self.omegamotor = "rot_center" # diffrz + self.dtymotor = "dty" # diffty + + self.dataroot = dataroot # folder to find {sample}/{sample}_{dset} + self.analysisroot = analysisroot # where to write or find sparse data + self.sample = sample # from bliss path + self.dset = dset # from bliss path + + self.dsname = "_".join((self.sample, self.dset)) + + self.datapath = os.path.join(self.dataroot, self.sample, self.dsname) + self.analysispath = os.path.join(self.analysisroot, self.sample, self.dsname) + self.masterfile = os.path.join(self.datapath, self.dsname + ".h5") + # These are in order ! The order of the lists is important - all things should match. - self.scans = None # read from master or load from analysis - self.frames_per_scan = None # how many frames (and motor positions) in each scan row. - self.imagefiles = None # List of strings. w.r.t self.datapath - self.frames_per_file = None # how many frames in this file (Lima files) - self.sparsefiles = None # maps sparse files to self.imagefiles - + self.scans = None # read from master or load from analysis + self.frames_per_scan = ( + None # how many frames (and motor positions) in each scan row. + ) + self.imagefiles = None # List of strings. w.r.t self.datapath + self.frames_per_file = None # how many frames in this file (Lima files) + self.sparsefiles = None # maps sparse files to self.imagefiles + self.shape = (0, 0) self.omega = None self.dty = None - - + def __repr__(self): r = [] - for name in "dataroot analysisroot sample dset".split(): - r.append('%s = "%s"'%( name, getattr(self, name) ) ) - r.append( 'shape = ( %d, %d)'%tuple(self.shape) ) + for name in "dataroot analysisroot sample dset".split(): + r.append('%s = "%s"' % (name, getattr(self, name))) + r.append("shape = ( %d, %d)" % tuple(self.shape)) if self.scans is not None: - r.append( '# scans %d from %s to %s'%( - len(self.scans), self.scans[0], self.scans[-1] )) - return "\n".join(r) + r.append( + "# scans %d from %s to %s" + % (len(self.scans), self.scans[0], self.scans[-1]) + ) + return "\n".join(r) - def compare(self, other): - '''Try to see if the load/save is working''' + """Try to see if the load/save is working""" from types import FunctionType - sattrs = set( [ name for name in vars(self) if name[0] != '_' ] ) - oattrs = set( [ name for name in vars(self) if name[0] != '_' ] ) + + sattrs = set([name for name in vars(self) if name[0] != "_"]) + oattrs = set([name for name in vars(self) if name[0] != "_"]) if sattrs != oattrs: - logging.info('Attribute mismatch '+str(sattrs)+' != '+str(oattrs)) + logging.info("Attribute mismatch " + str(sattrs) + " != " + str(oattrs)) return False for a in sattrs: - s = getattr( self, a ) + s = getattr(self, a) if isinstance(s, FunctionType): continue - o = getattr( other, a ) + o = getattr(other, a) t = type(s) if type(o) != type(s): - logging.info('Type mismatch %s %s'%(str(t),str(a))) + logging.info("Type mismatch %s %s" % (str(t), str(a))) return False if t == np.ndarray: if s.shape != o.shape: - logging.info('Shape mismatch %s %s'%(str(s.shape), str(o.shape))) + logging.info("Shape mismatch %s %s" % (str(s.shape), str(o.shape))) return False - if ( s != o ).all(): - logging.info('Data mismatch '+str(a)) + if (s != o).all(): + logging.info("Data mismatch " + str(a)) return False else: if s != o: - logging.info('Data mismatch ') + logging.info("Data mismatch ") return False - logging.info('Dataset objects seem to match!') + logging.info("Dataset objects seem to match!") return True - - - + def report(self): print(self) - print("# Collected %d missing %d"%(self.check_images())) - print("# Segmented %d missing %d"%(self.check_sparse())) + print("# Collected %d missing %d" % (self.check_images())) + print("# Segmented %d missing %d" % (self.check_sparse())) - def import_all(self, scans=None, shape=None): # collect the data - self.import_scans( scans=scans ) + self.import_scans(scans=scans) # lima frames self.import_imagefiles() # motor positions @@ -140,112 +144,132 @@ def import_all(self, scans=None, shape=None): self.import_nnz() except: logging.info("nnz not available. Segmentation done?") - - - def import_scans(self, scans=None, hname = None): - """ Reads in the scans from the bliss master file """ + + def import_scans(self, scans=None, hname=None): + """Reads in the scans from the bliss master file""" if hname is None: hname = self.masterfile frames_per_scan = [] - with h5py.File( hname, 'r' ) as hin: + with h5py.File(hname, "r") as hin: if scans is None: - scans = [scan for scan in list(hin['/']) if - (scan.endswith('.1') and - ('measurement' in hin[scan]) and - (self.detector in hin[scan]['measurement'])) ] + scans = [ + scan + for scan in list(hin["/"]) + if ( + scan.endswith(".1") + and ("measurement" in hin[scan]) + and (self.detector in hin[scan]["measurement"]) + ) + ] goodscans = [] for scan in scans: - frames = hin[scan]['measurement'][self.detector] - if len(frames.shape)==3: # need 1D series of frames + frames = hin[scan]["measurement"][self.detector] + if len(frames.shape) == 3: # need 1D series of frames goodscans.append(scan) - frames_per_scan.append( frames.shape[0] ) + frames_per_scan.append(frames.shape[0]) else: - print('Bad scan', scan) + print("Bad scan", scan) self.scans = goodscans self.frames_per_scan = frames_per_scan - logging.info( 'imported %d scans from %s'%(len(self.scans),hname)) + logging.info("imported %d scans from %s" % (len(self.scans), hname)) return self.scans - - + def import_imagefiles(self): - """ Get the Lima file names from the bliss master file, also scan_npoints """ - npts = None + """Get the Lima file names from the bliss master file, also scan_npoints""" + # npts = None self.imagefiles = [] self.frames_per_file = [] - with h5py.File( self.masterfile, 'r' ) as hin: - bad = [ ] - for i, scan in enumerate( self.scans ): - if ('measurement' not in hin[scan]) or (self.detector not in hin[scan]['measurement']): - print('Bad scan', scan) + with h5py.File(self.masterfile, "r") as hin: + bad = [] + for i, scan in enumerate(self.scans): + if ("measurement" not in hin[scan]) or ( + self.detector not in hin[scan]["measurement"] + ): + print("Bad scan", scan) bad.append(scan) continue - frames = hin[scan]['measurement'][self.detector] + frames = hin[scan]["measurement"][self.detector] self.imageshape = frames.shape[1:] for vsrc in frames.virtual_sources(): - self.imagefiles.append( vsrc.file_name ) - self.frames_per_file.append( vsrc.src_space.shape[0] ) # not sure about this + self.imagefiles.append(vsrc.file_name) + self.frames_per_file.append( + vsrc.src_space.shape[0] + ) # not sure about this # check limapath if self.limapath is None: self.limapath = vsrc.dset_name assert self.limapath == vsrc.dset_name - self.frames_per_file = np.array( self.frames_per_file, int ) - self.sparsefiles = [ name.replace( '/', '_' ).replace( '.h5', '_sparse.h5' ) for name in - self.imagefiles ] - logging.info( 'imported %d lima filenames'%( np.sum(self.frames_per_file) ) ) - - - def import_motors_from_master(self): # could also get these from sparse files if saved - """ read the motors from the lima file + self.frames_per_file = np.array(self.frames_per_file, int) + self.sparsefiles = [ + name.replace("/", "_").replace(".h5", "_sparse.h5") + for name in self.imagefiles + ] + logging.info("imported %d lima filenames" % (np.sum(self.frames_per_file))) + + def import_motors_from_master( + self, + ): # could also get these from sparse files if saved + """read the motors from the lima file you need to import the imagefiles first these will be the motor positions to accompany the images """ - self.omega = [None,] * len(self.scans) - self.dty = [None,] * len(self.scans) - with h5py.File( self.masterfile, 'r' ) as hin: + self.omega = [ + None, + ] * len(self.scans) + self.dty = [ + None, + ] * len(self.scans) + with h5py.File(self.masterfile, "r") as hin: bad = [] - for i, scan in enumerate( self.scans ): + for i, scan in enumerate(self.scans): # Should always be there, if not, filter scans before you get to here - om = hin[scan][ 'measurement' ][ self.omegamotor ][()] + om = hin[scan]["measurement"][self.omegamotor][()] if len(om) == self.frames_per_scan[i]: - self.omega[i] = om - else: # hope the first point was good ? Probably corrupted MUSST data. - self.omega[i] = [om[0],] + self.omega[i] = om + else: # hope the first point was good ? Probably corrupted MUSST data. + self.omega[i] = [ + om[0], + ] bad.append(i) # this can be an array or a scalar - dty = hin[scan][ 'instrument/positioners' ][ self.dtymotor ] + dty = hin[scan]["instrument/positioners"][self.dtymotor] if len(dty.shape) == 0: - self.dty[i] = np.full( self.frames_per_scan[i], dty[()] ) + self.dty[i] = np.full(self.frames_per_scan[i], dty[()]) elif dty.shape[0] == self.frames_per_scan[i]: self.dty[i] = dty[:] else: # corrupted MUSST? - self.dty[i] = np.full( self.frames_per_scan[i], dty[0] ) + self.dty[i] = np.full(self.frames_per_scan[i], dty[0]) for b in bad: - dom = [ (abs( self.omega[i][0] - self.omega[b] ), i) for i in range(len(self.scans)) - if i not in bad ] - if len(dom)>0: - j = np.argmin( dom[0][1] ) - self.omega[b] = self.omega[j] # best match - print("replace bad scan omega",b, self.scans[b],"with",j, self.scans[j]) - logging.info( 'imported omega/dty' ) - - + dom = [ + (abs(self.omega[i][0] - self.omega[b]), i) + for i in range(len(self.scans)) + if i not in bad + ] + if len(dom) > 0: + j = np.argmin(dom[0][1]) + self.omega[b] = self.omega[j] # best match + print( + "replace bad scan omega", b, self.scans[b], "with", j, self.scans[j] + ) + logging.info("imported omega/dty") + def guess_shape(self): - """Guess the shape if it was not given """ - npts = np.sum( self.frames_per_scan ) - if len(self.scans) == 1: # probably fscan2d or f2scan - with h5py.File(self.masterfile,'r') as hin: + """Guess the shape if it was not given""" + npts = np.sum(self.frames_per_scan) + if len(self.scans) == 1: # probably fscan2d or f2scan + with h5py.File(self.masterfile, "r") as hin: s = hin[self.scans[0]] - title = s['title'].asstr()[()] - print('Scan title',title) - if title.split()[0] == 'fscan2d': - s0 = s['instrument/fscan_parameters/slow_npoints'][()] - s1 = s['instrument/fscan_parameters/fast_npoints'][()] - elif title.split()[0] == 'f2scan': + title = s["title"].asstr()[()] + print("Scan title", title) + if title.split()[0] == "fscan2d": + s0 = s["instrument/fscan_parameters/slow_npoints"][()] + s1 = s["instrument/fscan_parameters/fast_npoints"][()] + elif title.split()[0] == "f2scan": # good luck ? Assuming rotation was the inner loop here: - step = s['instrument/fscan_parameters/step_size'][()] - s1 = int( np.round( 360 / step ) ) + step = s["instrument/fscan_parameters/step_size"][()] + s1 = int(np.round(360 / step)) s0 = npts // s1 else: s0 = 1 @@ -254,23 +278,24 @@ def guess_shape(self): s0 = len(self.scans) s1 = npts // s0 self.shape = s0, s1 - if np.prod( self.shape ) != npts: + if np.prod(self.shape) != npts: print("Warning: irregular scan - might be bugs in here") print(npts, len(self.scans)) - self.omega = np.array( self.omega ) - self.dty = np.array(self.dty ) - logging.info( 'sinogram shape = ( %d , %d ) imageshape = ( %d , %d)'%( - self.shape[0], self.shape[1], self.imageshape[0], self.imageshape[1] ) ) - + self.omega = np.array(self.omega) + self.dty = np.array(self.dty) + logging.info( + "sinogram shape = ( %d , %d ) imageshape = ( %d , %d)" + % (self.shape[0], self.shape[1], self.imageshape[0], self.imageshape[1]) + ) def guessbins(self): ny, nomega = self.shape self.omin = self.omega.min() self.omax = self.omega.max() - if (self.omax - self.omin)>360: + if (self.omax - self.omin) > 360: # multi-turn scan... - self.omin = 0. - self.omax = 360. + self.omin = 0.0 + self.omax = 360.0 self.omega_for_bins = self.omega % 360 else: self.omega_for_bins = self.omega @@ -284,18 +309,23 @@ def guessbins(self): self.ystep = (self.ymax - self.ymin) / (ny - 1) else: self.ystep = 1 - self.obincens = np.linspace( self.omin, self.omax, nomega ) - self.ybincens = np.linspace( self.ymin, self.ymax, ny ) - self.obinedges = np.linspace( self.omin-self.ostep/2, self.omax + self.ostep/2, nomega + 1 ) - self.ybinedges = np.linspace( self.ymin-self.ystep/2, self.ymax + self.ystep/2, ny + 1 ) - - - def sinohist(self, weights=None, omega=None, dty=None, method='fast'): - """ Bin some data onto the sinogram histogram """ + self.obincens = np.linspace(self.omin, self.omax, nomega) + self.ybincens = np.linspace(self.ymin, self.ymax, ny) + self.obinedges = np.linspace( + self.omin - self.ostep / 2, self.omax + self.ostep / 2, nomega + 1 + ) + self.ybinedges = np.linspace( + self.ymin - self.ystep / 2, self.ymax + self.ystep / 2, ny + 1 + ) + + def sinohist(self, weights=None, omega=None, dty=None, method="fast"): + """Bin some data onto the sinogram histogram""" bins = len(self.obincens), len(self.ybincens) - rng = ( (self.obinedges[0], self.obinedges[-1]), - (self.ybinedges[0], self.ybinedges[-1]) ) - if isinstance( weights, np.ndarray): + rng = ( + (self.obinedges[0], self.obinedges[-1]), + (self.ybinedges[0], self.ybinedges[-1]), + ) + if isinstance(weights, np.ndarray): wt = weights.ravel() else: wt = weights @@ -303,110 +333,111 @@ def sinohist(self, weights=None, omega=None, dty=None, method='fast'): omega = self.omega_for_bins if dty is None: dty = self.dty - if method == 'numpy': - ret = np.histogram2d( omega.ravel(), dty.ravel(), - weights = wt, bins = bins, range=rng ) + if method == "numpy": + ret = np.histogram2d( + omega.ravel(), dty.ravel(), weights=wt, bins=bins, range=rng + ) histo = ret[0] - elif method == 'fast': - histo = fast_histogram.histogram2d( omega.ravel(), dty.ravel(), - weights = wt, bins = bins, range=rng ) + elif method == "fast": + histo = fast_histogram.histogram2d( + omega.ravel(), dty.ravel(), weights=wt, bins=bins, range=rng + ) return histo - def import_nnz(self): - """ Read the nnz arrays from the scans """ + """Read the nnz arrays from the scans""" nnz = [] for spname in self.sparsefiles: - with h5py.File( os.path.join( self.analysispath, spname ), "r" ) as hin: - nnz.append( hin[self.limapath]['nnz'][:] ) - self.nnz = np.concatenate( nnz ).reshape( self.shape ).astype( np.int32 ) - logging.info('imported nnz, average %f'%(self.nnz.mean())) # expensive if you are not logging it. - - -# def compute_pixel_labels(self): -# this should instead from from the pk2d file generated by sinograms/properties.py -# nlm = [] -# for spname in self.sparsefiles: -# n, l = peaklabel.add_localmax_peaklabel( os.path.join( self.analysispath, spname ), -# self.limapath ) -# nlm.append(n) -# self.nlm = np.concatenate( nlm ).reshape( self.shape ) - - -# def import_nlm(self): -# this should instead from from the pk2d file generated by sinograms/properties.py -# """ Read the Nlmlabels -# These are the number of localmax peaks per frame -# """ -# nlm = [] -# for spname in self.sparsefiles: -# with h5py.File( os.path.join( self.analysispath, spname ), "r" ) as hin: -# nlm.append( hin[self.limapath]['Nlmlabel'][:] ) -# self.nlm = np.concatenate( nlm ).reshape( self.shape ) -# logging.info('imported nlm, max %d'%(self.nlm.max())) - - - def check_files(self, path, filenames, verbose = 0): - """ See whether files are created or not """ + with h5py.File(os.path.join(self.analysispath, spname), "r") as hin: + nnz.append(hin[self.limapath]["nnz"][:]) + self.nnz = np.concatenate(nnz).reshape(self.shape).astype(np.int32) + logging.info( + "imported nnz, average %f" % (self.nnz.mean()) + ) # expensive if you are not logging it. + + # def compute_pixel_labels(self): + # this should instead from from the pk2d file generated by sinograms/properties.py + # nlm = [] + # for spname in self.sparsefiles: + # n, l = peaklabel.add_localmax_peaklabel( os.path.join( self.analysispath, spname ), + # self.limapath ) + # nlm.append(n) + # self.nlm = np.concatenate( nlm ).reshape( self.shape ) + + # def import_nlm(self): + # this should instead from from the pk2d file generated by sinograms/properties.py + # """ Read the Nlmlabels + # These are the number of localmax peaks per frame + # """ + # nlm = [] + # for spname in self.sparsefiles: + # with h5py.File( os.path.join( self.analysispath, spname ), "r" ) as hin: + # nlm.append( hin[self.limapath]['Nlmlabel'][:] ) + # self.nlm = np.concatenate( nlm ).reshape( self.shape ) + # logging.info('imported nlm, max %d'%(self.nlm.max())) + + def check_files(self, path, filenames, verbose=0): + """See whether files are created or not""" # images collected done = 0 missing = 0 for fname in filenames: - fullname = os.path.join( path, fname ) - if os.path.exists( fullname ): + fullname = os.path.join(path, fname) + if os.path.exists(fullname): done += 1 else: missing += 1 - if verbose>0: - print("missing", fullname ) + if verbose > 0: + print("missing", fullname) verbose -= 1 return done, missing - - + def check_images(self): - """ Is the experiment finished ? """ - return self.check_files( self.datapath, self.imagefiles ) - - + """Is the experiment finished ?""" + return self.check_files(self.datapath, self.imagefiles) + def check_sparse(self): - """ Has the segmentation been done ? """ - return self.check_files( self.analysispath, self.sparsefiles, verbose=2 ) - - - def save( self, h5name, h5group = '/' ): - - ZIP = { 'compression': 'gzip' } - - with h5py.File( h5name, "a") as hout: - grp = hout[ h5group ] + """Has the segmentation been done ?""" + return self.check_files(self.analysispath, self.sparsefiles, verbose=2) + + def save(self, h5name, h5group="/"): + + ZIP = {"compression": "gzip"} + + with h5py.File(h5name, "a") as hout: + grp = hout[h5group] # Simple small objects for name in self.ATTRNAMES: - data = getattr( self, name, None ) + data = getattr(self, name, None) if data is not None: - grp.attrs[ name ] = data + grp.attrs[name] = data # The string lists for name in self.STRINGLISTS: - data = getattr( self, name, None ) + data = getattr(self, name, None) if data is not None and len(data): - sdata = np.array( data, "S" ) - ds = grp.require_dataset( name, - shape = sdata.shape, - chunks = sdata.shape, - dtype = h5py.string_dtype(), - **ZIP ) + sdata = np.array(data, "S") + ds = grp.require_dataset( + name, + shape=sdata.shape, + chunks=sdata.shape, + dtype=h5py.string_dtype(), + **ZIP + ) ds[:] = sdata # for name in self.NDNAMES: data = getattr(self, name, None) if data is not None: - data = np.asarray( data ) + data = np.asarray(data) try: chunks = guess_chunks(name, data.shape) - ds = grp.require_dataset( name, - shape = data.shape, - chunks = chunks, - dtype = data.dtype, - **ZIP ) + ds = grp.require_dataset( + name, + shape=data.shape, + chunks=chunks, + dtype=data.dtype, + **ZIP + ) ds[:] = data except: print(name) @@ -414,67 +445,70 @@ def save( self, h5name, h5group = '/' ): print(data.shape) print(chunks) raise - - def load( self, h5name, h5group = '/' ): - """ Recover this from a hdf5 file """ - with h5py.File( h5name, "r") as hin: - grp = hin[ h5group ] + def load(self, h5name, h5group="/"): + """Recover this from a hdf5 file""" + with h5py.File(h5name, "r") as hin: + grp = hin[h5group] for name in self.ATTRNAMES: if name in grp.attrs: - setattr( self, name, grp.attrs.get(name) ) - self.shape = tuple( self.shape ) # hum + setattr(self, name, grp.attrs.get(name)) + self.shape = tuple(self.shape) # hum for name in self.NDNAMES: if name in grp: data = grp[name][()] - setattr( self, name, data ) + setattr(self, name, data) for name in self.STRINGLISTS: if name in grp: stringlist = list(grp[name][()]) - if hasattr(stringlist[0], 'decode') or isinstance(stringlist[0], np.ndarray): + if hasattr(stringlist[0], "decode") or isinstance( + stringlist[0], np.ndarray + ): data = [s.decode() for s in stringlist] else: data = stringlist - setattr( self, name, data ) + setattr(self, name, data) self.guessbins() return self -def load( h5name, h5group = '/' ): - return DataSet().load( h5name, h5group ) +def load(h5name, h5group="/"): + return DataSet().load(h5name, h5group) + -def import_from_sparse( hname, - omegamotor='instrument/positioners/rot', - dtymotor= 'instrument/positioners/dty', - ): +def import_from_sparse( + hname, + omegamotor="instrument/positioners/rot", + dtymotor="instrument/positioners/dty", +): ds = DataSet() - with h5py.File(hname,'r') as hin: - scans = list(hin['/']) - order = np.argsort( [ float(v) for v in scans if v.endswith('.1')] ) - scans = [ scans[i] for i in order ] - dty = [ hin[scan][dtymotor][()] for scan in scans ] - omega = [ hin[scan][omegamotor][()] for scan in scans ] - nnz = [hin[scan]['nnz'][()] for scan in scans] -# nlm = [hin[scan]['Nlmlabel'][()] for scan in scans] + with h5py.File(hname, "r") as hin: + scans = list(hin["/"]) + order = np.argsort([float(v) for v in scans if v.endswith(".1")]) + scans = [scans[i] for i in order] + dty = [hin[scan][dtymotor][()] for scan in scans] + omega = [hin[scan][omegamotor][()] for scan in scans] + nnz = [hin[scan]["nnz"][()] for scan in scans] + # nlm = [hin[scan]['Nlmlabel'][()] for scan in scans] ds.scans = scans ds.nnz = nnz ds.nnz = np.array(nnz) ds.shape = ds.nnz.shape ds.omega = np.zeros(ds.nnz.shape, float) - for i,o in enumerate( omega ): - if isinstance( o, float ) or (len(o) == len(ds.nnz[i])): + for i, o in enumerate(omega): + if isinstance(o, float) or (len(o) == len(ds.nnz[i])): ds.omega[i] = o if len(o) > len(ds.nnz[i]): - ds.omega[i] = ds.omega[i-2] # guess zig zag + ds.omega[i] = ds.omega[i - 2] # guess zig zag # warning here - - ds.dty = np.zeros(ds.nnz.shape, float) - for i,o in enumerate( dty ): - if isinstance( o, float ) or (len(o) == len(ds.nnz[i])): + + ds.dty = np.zeros(ds.nnz.shape, float) + for i, o in enumerate(dty): + if isinstance(o, float) or (len(o) == len(ds.nnz[i])): ds.dty[i] = o else: - raise Exception('Cannot read %d dty %s %s'%(i, str(o), str(o.shape) )) -# assert ds.nlm.shape == ds.shape + raise Exception("Cannot read %d dty %s %s" % (i, str(o), str(o.shape))) + # assert ds.nlm.shape == ds.shape try: ds.guess_scans() except: @@ -485,6 +519,7 @@ def import_from_sparse( hname, print("warning, guessbins failed") return ds + # Example # s = dataset( # dataroot = "/data/visitor/ma5415/id11/20221027", @@ -494,25 +529,29 @@ def import_from_sparse( hname, # s.import_all() -def check( dataroot, analysisroot, sample, dset, destination, scans=None ): - - h5o = DataSet( dataroot = dataroot, analysisroot = analysisroot, sample = sample, dset = dset ) +def check(dataroot, analysisroot, sample, dset, destination, scans=None): + + h5o = DataSet( + dataroot=dataroot, analysisroot=analysisroot, sample=sample, dset=dset + ) h5o.import_all(scans=scans) - h5o.save( destination ) - + h5o.save(destination) + print("Checking: Read back from hdf5") - t = load( destination ) + t = load(destination) t.report() return t.compare(h5o) - -if __name__=="__main__": + + +if __name__ == "__main__": import sys + logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) - + dataroot = sys.argv[1] analysisroot = sys.argv[2] sample = sys.argv[3] dset = sys.argv[4] destination = sys.argv[5] - - check( dataroot, analysisroot, sample, dset, destination ) + + check(dataroot, analysisroot, sample, dset, destination) diff --git a/ImageD11/sinograms/lima_segmenter.py b/ImageD11/sinograms/lima_segmenter.py index c6e249b0..fde3dae0 100755 --- a/ImageD11/sinograms/lima_segmenter.py +++ b/ImageD11/sinograms/lima_segmenter.py @@ -1,4 +1,3 @@ - from __future__ import print_function, division """ Do segmentation of lima/eiger files with no notion of metadata @@ -17,24 +16,31 @@ # Code to clean the 2D image and reduce it to a sparse array: # things we might edit class SegmenterOptions: - + # These are the stuff that belong to us in the hdf5 file (in our group: lima_segmenter) - jobnames = ( 'cut','howmany','pixels_in_spot', - 'maskfile', 'bgfile', - 'cores_per_job', 'files_per_core') - + jobnames = ( + "cut", + "howmany", + "pixels_in_spot", + "maskfile", + "bgfile", + "cores_per_job", + "files_per_core", + ) + # There are things that DO NOT belong to us - datasetnames = ( 'limapath', 'analysispath', 'datapath', 'imagefiles', 'sparsefiles' ) - - def __init__(self, - cut = 1, # keep values abuve cut in first look at image - howmany = 100000, # max pixels per frame to keep - pixels_in_spot = 3, - maskfile = "", - bgfile = "", - cores_per_job = 8, - files_per_core = 8, - ): + datasetnames = ("limapath", "analysispath", "datapath", "imagefiles", "sparsefiles") + + def __init__( + self, + cut=1, # keep values abuve cut in first look at image + howmany=100000, # max pixels per frame to keep + pixels_in_spot=3, + maskfile="", + bgfile="", + cores_per_job=8, + files_per_core=8, + ): self.cut = cut self.howmany = howmany self.pixels_in_spot = pixels_in_spot @@ -43,13 +49,17 @@ def __init__(self, self.bg = None self.files_per_core = files_per_core self.cores_per_job = cores_per_job - + def __repr__(self): - return "\n".join( ["%s:%s"%(name,getattr(self, name, None )) for name in - self.jobnames + self.datasetnames ] ) - + return "\n".join( + [ + "%s:%s" % (name, getattr(self, name, None)) + for name in self.jobnames + self.datasetnames + ] + ) + def setup(self): - self.thresholds = tuple( [ self.cut*pow(2,i) for i in range(6) ] ) + self.thresholds = tuple([self.cut * pow(2, i) for i in range(6)]) # validate input if len(self.maskfile): m = fabio.open(self.maskfile).data @@ -60,62 +70,64 @@ def setup(self): print("# Opened mask", self.maskfile) if len(self.bgfile): self.bg = fabio.open(self.bgfile).data - + def load(self, h5name, h5group): - - with h5py.File( h5name, "r" ) as hin: + + with h5py.File(h5name, "r") as hin: grp = hin[h5group] pgrp = grp.parent for name in self.jobnames: if name in grp.attrs: - setattr(self, name, grp.attrs.get( name ) ) + setattr(self, name, grp.attrs.get(name)) for name in self.datasetnames: # datasetnames = ( 'limapath', 'analysispath', 'datapath', 'imagefiles', 'sparsefiles' ) if name in pgrp.attrs: - data = pgrp.attrs.get( name ) - setattr(self, name, data ) + data = pgrp.attrs.get(name) + setattr(self, name, data) elif name in pgrp: data = pgrp[name][()] - if name.endswith('s'): # plural - if isinstance( data, np.ndarray ): + if name.endswith("s"): # plural + if isinstance(data, np.ndarray): data = list(data) - if isinstance( data[0], np.ndarray) or isinstance(data[0], bytes): + if isinstance(data[0], np.ndarray) or isinstance( + data[0], bytes + ): data = [x.decode() for x in data] else: data = str(data) - setattr(self, name, data ) + setattr(self, name, data) else: - logging.warning("Missing " + name ) + logging.warning("Missing " + name) self.setup() - + def save(self, h5name, h5group): - logging.info("saving to "+h5name+"::"+h5group) - with h5py.File( h5name, "a" ) as hout: - grp = hout.require_group( h5group ) + logging.info("saving to " + h5name + "::" + h5group) + with h5py.File(h5name, "a") as hout: + grp = hout.require_group(h5group) for name in self.jobnames: - value = getattr( self, name, None ) + value = getattr(self, name, None) print(name, value) if value is not None: - grp.attrs[ name ] = value - - + grp.attrs[name] = value + + ########################## should not need to change much below here -import functools import numpy as np -import hdf5plugin import h5py import fabio import numba + # pip install ImageD11 --no-deps # if you do not have it yet: -from ImageD11 import sparseframe, cImageD11 +from ImageD11 import sparseframe try: from bslz4_to_sparse import chunk2sparse except: chunk2sparse = None + @numba.njit def select(img, mask, row, col, val, cut): # TODO: This is in now cImageD11.tosparse_{u16|f32} @@ -123,7 +135,7 @@ def select(img, mask, row, col, val, cut): k = 0 for s in range(img.shape[0]): for f in range(img.shape[1]): - if img[s, f]*mask[s,f] > cut: + if img[s, f] * mask[s, f] > cut: row[k] = s col[k] = f val[k] = img[s, f] @@ -168,8 +180,10 @@ def top_pixels(nnz, row, col, val, howmany, thresholds): break return n + OPTIONS = None # global. Nasty. + class frmtosparse: def __init__(self, mask, dtype): # cache the mallocs on this function. Should be one per process @@ -177,22 +191,23 @@ def __init__(self, mask, dtype): self.col = np.empty(mask.size, np.uint16) self.val = np.empty(mask.size, dtype) self.mask = mask + def __call__(self, frm, cut): nnz = select(frm, self.mask, self.row, self.col, self.val, cut) return nnz, self.row[:nnz], self.col[:nnz], self.val[:nnz] - def clean(nnz, row, col, val): global OPTIONS if nnz == 0: return None if nnz > OPTIONS.howmany: - nnz = top_pixels(nnz, row, col, val, OPTIONS.howmany, OPTIONS.thresholds) + nnz = top_pixels(nnz, row, col, val, OPTIONS.howmany, OPTIONS.thresholds) # Now get rid of the single pixel 'peaks' # (for the mallocs, data is copied here) - s = sparseframe.sparse_frame(row[:nnz].copy(), col[:nnz].copy(), - OPTIONS.mask.shape) + s = sparseframe.sparse_frame( + row[:nnz].copy(), col[:nnz].copy(), OPTIONS.mask.shape + ) s.set_pixels("intensity", val[:nnz].copy()) else: s = sparseframe.sparse_frame(row, col, OPTIONS.mask.shape) @@ -200,15 +215,16 @@ def clean(nnz, row, col, val): if OPTIONS.pixels_in_spot <= 1: return s # label them according to the connected objects - s.set_pixels('f32', s.pixels['intensity'].astype(np.float32)) - npk = sparseframe.sparse_connected_pixels( s, threshold=0, - data_name="f32", label_name="cp") + s.set_pixels("f32", s.pixels["intensity"].astype(np.float32)) + npk = sparseframe.sparse_connected_pixels( + s, threshold=0, data_name="f32", label_name="cp" + ) # only keep spots with more than 3 pixels ... -# mom = sparseframe.sparse_moments( s, -# intensity_name="f32", -# labels_name="cp" ) -# npx = mom[:, cImageD11.s2D_1] - npx = np.bincount(s.pixels['cp'], minlength = npk ) + # mom = sparseframe.sparse_moments( s, + # intensity_name="f32", + # labels_name="cp" ) + # npx = mom[:, cImageD11.s2D_1] + npx = np.bincount(s.pixels["cp"], minlength=npk) pxcounts = npx[s.pixels["cp"]] pxmsk = pxcounts >= OPTIONS.pixels_in_spot if pxmsk.sum() == 0: @@ -216,34 +232,40 @@ def clean(nnz, row, col, val): sf = s.mask(pxmsk) return sf + def reader(frms, mask, cut): """ iterator to read chunks or frames and segment them returns sparseframes """ - if (chunk2sparse is not None) and ('32008' in frms._filters) and ( - not frms.is_virtual) and (OPTIONS.bg is None): - print('# reading compressed chunks') - fun = chunk2sparse( mask, dtype = frms.dtype ) + if ( + (chunk2sparse is not None) + and ("32008" in frms._filters) + and (not frms.is_virtual) + and (OPTIONS.bg is None) + ): + print("# reading compressed chunks") + fun = chunk2sparse(mask, dtype=frms.dtype) for i in range(frms.shape[0]): - filters, chunk = frms.id.read_direct_chunk((i,0,0)) + filters, chunk = frms.id.read_direct_chunk((i, 0, 0)) npx, row, col, val = fun.coo(chunk, cut) - spf = clean( npx, row, col, val ) + spf = clean(npx, row, col, val) yield spf else: - fun = frmtosparse( mask, frms.dtype ) + fun = frmtosparse(mask, frms.dtype) for i in range(frms.shape[0]): frm = frms[i] if OPTIONS.bg is not None: frm = frm.astype(np.float32) - OPTIONS.bg - npx, row, col, val = fun( frm, cut ) - spf = clean( npx, row, col, val ) + npx, row, col, val = fun(frm, cut) + spf = clean(npx, row, col, val) yield spf -def segment_lima( args ): + +def segment_lima(args): """Does segmentation on a single hdf5 srcname, - destname, + destname, dataset """ srcname, destname, dataset = args @@ -258,7 +280,7 @@ def segment_lima( args ): with h5py.File(destname, "a") as hout: with h5py.File(srcname, "r") as hin: if dataset not in hin: - print("Missing",dataset,"in",srcname) + print("Missing", dataset, "in", srcname) return # TODO/fixme - copy some headers over print("# ", srcname, destname, dataset) @@ -299,29 +321,38 @@ def segment_lima( args ): npx += spf.nnz g.attrs["npx"] = npx end = time.time() - print("\n# Done", nframes,'frames',npx,'pixels','fps',nframes/(end-start) ) + print("\n# Done", nframes, "frames", npx, "pixels", "fps", nframes / (end - start)) return destname # the output file should be flushed and closed when this returns + OPTIONS = None -def main( options ): + +def main(options): global OPTIONS OPTIONS = options args = [] - files_per_job = options.cores_per_job * options.files_per_core # 64 files per job - start = options.jobid*files_per_job - end = min( (options.jobid+1)*files_per_job, len(options.imagefiles) ) + files_per_job = options.cores_per_job * options.files_per_core # 64 files per job + start = options.jobid * files_per_job + end = min((options.jobid + 1) * files_per_job, len(options.imagefiles)) for i in range(start, end): - args.append( ( os.path.join( options.datapath, options.imagefiles[i] ), # src - os.path.join( options.analysispath, options.sparsefiles[i] ), # dest - options.limapath ) ) + args.append( + ( + os.path.join(options.datapath, options.imagefiles[i]), # src + os.path.join(options.analysispath, options.sparsefiles[i]), # dest + options.limapath, + ) + ) if 1: import concurrent.futures - with concurrent.futures.ProcessPoolExecutor(max_workers=options.cores_per_job) as mypool: + + with concurrent.futures.ProcessPoolExecutor( + max_workers=options.cores_per_job + ) as mypool: donefile = sys.stdout - for fname in mypool.map( segment_lima, args, chunksize=1 ): + for fname in mypool.map(segment_lima, args, chunksize=1): donefile.write(fname + "\n") donefile.flush() else: @@ -329,35 +360,38 @@ def main( options ): fname = segment_lima(arg) print(fname) sys.stdout.flush() - + print("All done") - - - -def setup_slurm_array( dsname, dsgroup='/'): - """ Send the tasks to slurm """ - dso = dataset.load( dsname, dsgroup ) - nfiles = len(dso.sparsefiles) - dstlima = [ os.path.join( dso.analysispath, name ) for name in dso.sparsefiles ] + + +def setup_slurm_array(dsname, dsgroup="/"): + """Send the tasks to slurm""" + dso = dataset.load(dsname, dsgroup) + nfiles = len(dso.sparsefiles) + dstlima = [os.path.join(dso.analysispath, name) for name in dso.sparsefiles] done = 0 for d in dstlima: if os.path.exists(d): done += 1 - print("total files to process", nfiles, 'done', done) + print("total files to process", nfiles, "done", done) if done == nfiles: return None - sdir = os.path.join( dso.analysispath, 'slurm' ) - if not os.path.exists( sdir ): - os.makedirs( sdir ) - options = SegmenterOptions( ) - options.load( dsname, dsgroup + '/lima_segmenter' ) - + sdir = os.path.join(dso.analysispath, "slurm") + if not os.path.exists(sdir): + os.makedirs(sdir) + options = SegmenterOptions() + options.load(dsname, dsgroup + "/lima_segmenter") + files_per_job = options.files_per_core * options.cores_per_job - jobs_needed = math.ceil( nfiles / files_per_job ) - sbat = os.path.join( sdir, "lima_segmenter_slurm.sh" ) - command = "python3 -m ImageD11.sinograms.lima_segmenter segment %s $SLURM_ARRAY_TASK_ID"%(dsname) - with open(sbat ,"w") as fout: - fout.write( """#!/bin/bash + jobs_needed = math.ceil(nfiles / files_per_job) + sbat = os.path.join(sdir, "lima_segmenter_slurm.sh") + command = ( + "python3 -m ImageD11.sinograms.lima_segmenter segment %s $SLURM_ARRAY_TASK_ID" + % (dsname) + ) + with open(sbat, "w") as fout: + fout.write( + """#!/bin/bash #SBATCH --job-name=array-lima_segmenter #SBATCH --output=%s/lima_segmenter_%%A_%%a.out #SBATCH --error=%s/lima_segmenter_%%A_%%a.err @@ -371,46 +405,47 @@ def setup_slurm_array( dsname, dsgroup='/'): echo Running on $HOSTNAME : %s OMP_NUM_THREADS=1 %s > %s/lima_segmenter_$SLURM_ARRAY_TASK_ID.log 2>&1 date -"""%(sdir,sdir,jobs_needed, options.cores_per_job, command, command, sdir)) - logging.info("wrote "+sbat) +""" + % (sdir, sdir, jobs_needed, options.cores_per_job, command, command, sdir) + ) + logging.info("wrote " + sbat) return sbat -def setup( dsname, **kwds ): - dso = dataset.load( dsname ) - options = SegmenterOptions( **kwds ) - if 'eiger' in dso.limapath: - if 'cut' not in kwds: +def setup(dsname, **kwds): + dso = dataset.load(dsname) + options = SegmenterOptions(**kwds) + if "eiger" in dso.limapath: + if "cut" not in kwds: options.cut = 1 - if 'maskfile' not in kwds: + if "maskfile" not in kwds: options.maskfile = "/data/id11/nanoscope/Eiger/mask_20210428.edf" - elif 'frelon3' in dso.limapath: - if 'cut' not in kwds: - options.cut = 25, # keep values abuve cut in first look at image + elif "frelon3" in dso.limapath: + if "cut" not in kwds: + options.cut = (25,) # keep values abuve cut in first look at image else: print("I don't know what to do") - options.save( dsname, 'lima_segmenter' ) - return setup_slurm_array( dsname ) + options.save(dsname, "lima_segmenter") + return setup_slurm_array(dsname) + - def segment(): # Uses a hdffile from dataset.py to steer the processing - # everything is passing via this file. - # + # everything is passing via this file. + # h5name = sys.argv[2] - + # This assumes forking. To be investigated otherwise. options = SegmenterOptions() - options.load( h5name, 'lima_segmenter' ) - options.jobid = int( sys.argv[3] ) - main( options ) + options.load(h5name, "lima_segmenter") + options.jobid = int(sys.argv[3]) + main(options) + - if __name__ == "__main__": - - if sys.argv[1] == 'setup': - setup( sys.argv[2] ) - - if sys.argv[1] == 'segment': + + if sys.argv[1] == "setup": + setup(sys.argv[2]) + + if sys.argv[1] == "segment": segment() - diff --git a/ImageD11/sinograms/polefigures.py b/ImageD11/sinograms/polefigures.py index 2230e824..7e408de2 100644 --- a/ImageD11/sinograms/polefigures.py +++ b/ImageD11/sinograms/polefigures.py @@ -1,4 +1,3 @@ - """WARNING: work in progress""" @@ -13,79 +12,96 @@ import ImageD11.parameters -def main( pars, - dataset, - - ): +def main( + pars, + dataset, +): pass class splatter: - """ splats pixels onto polefigures """ - def __init__(self, - parfile, - dxfile = '/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dx.edf', - dyfile = '/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dy.edf'): + """splats pixels onto polefigures""" + + def __init__( + self, + parfile, + dxfile="/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dx.edf", + dyfile="/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dy.edf", + ): self.pardict = ImageD11.parameters.parameters(parfile).parameters - self.pardict['dxfile'] = dxfile - self.pardict['dyfile'] = dyfile - self.pLUT = ImageD11.transform.pixelLUT( self.pardict ) - - def process(self, dsfile): - ds = ImageD11.sinograms.dataset( dsfile ) - + self.pardict["dxfile"] = dxfile + self.pardict["dyfile"] = dyfile + self.pLUT = ImageD11.transform.pixelLUT(self.pardict) + def process(self, dsfile): + ds = ImageD11.sinograms.dataset(dsfile) - if 0: import numba - ds = ImageD11.sinograms.dataset.load('/data/visitor/blc14570/id11/20230425/PROCESSED_DATA/ds_Martensite500C_DTz50.h5') + + ds = ImageD11.sinograms.dataset.load( + "/data/visitor/blc14570/id11/20230425/PROCESSED_DATA/ds_Martensite500C_DTz50.h5" + ) ds.omega.shape, len(ds.sparsefiles) - chosen = ds.omega.shape[0]//2 + 1 - sps = ImageD11.sparseframe.SparseScan( os.path.join(ds.analysispath, ds.sparsefiles[chosen]), ds.limapath ) - pars = ImageD11.parameters.read_par_file( '/data/visitor/blc14570/id11/20230425/PROCESSED_DATA/FeBCC.par' ).parameters - pars['dxfile'] = '/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dx.edf' - pars['dyfile'] = '/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dy.edf' - plut = ImageD11.transform.PixelLUT( pars ) - ds = 2 * np.sin( np.radians( plut.tth / 2 ) ) / pars['wavelength'] + chosen = ds.omega.shape[0] // 2 + 1 + sps = ImageD11.sparseframe.SparseScan( + os.path.join(ds.analysispath, ds.sparsefiles[chosen]), ds.limapath + ) + pars = ImageD11.parameters.read_par_file( + "/data/visitor/blc14570/id11/20230425/PROCESSED_DATA/FeBCC.par" + ).parameters + pars["dxfile"] = "/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dx.edf" + pars["dyfile"] = "/data/id11/nanoscope/Eiger/spatial_20210415_JW/e2dy.edf" + plut = ImageD11.transform.PixelLUT(pars) + ds = 2 * np.sin(np.radians(plut.tth / 2)) / pars["wavelength"] a0 = 2.86 - uc = ImageD11.unitcell.unitcell( [a0,a0,a0,90,90,90,],'I') - uc.makerings( ds.max() ) - labels = np.zeros( ds.shape, dtype=np.uint32 ) - for i, dsr in enumerate( uc.ringds ): + uc = ImageD11.unitcell.unitcell( + [ + a0, + a0, + a0, + 90, + 90, + 90, + ], + "I", + ) + uc.makerings(ds.max()) + labels = np.zeros(ds.shape, dtype=np.uint32) + for i, dsr in enumerate(uc.ringds): tol = 4e-3 * ds + 1e-2 - m = abs( ds - dsr ) < tol + m = abs(ds - dsr) < tol labels[m] = i + 1 # eta values - etabin = np.round( ( plut.eta % 360 ) / 0.1 ).astype(int) + etabin = np.round((plut.eta % 360) / 0.1).astype(int) etabin.min(), etabin.max() - output_shape = (labels.max()+1, sps.shape[0], 3601) - pfs = np.zeros( output_shape, int) - ipf = labels[ sps.row, sps.col ] - ifrm = np.zeros( len(sps.row), int ) - for i in range( len(sps.ipt) - 1): - ifrm[ sps.ipt[i]: sps.ipt[i+1] ] = i - ieta = etabin[ sps.row, sps.col ] + output_shape = (labels.max() + 1, sps.shape[0], 3601) + pfs = np.zeros(output_shape, int) + ipf = labels[sps.row, sps.col] + ifrm = np.zeros(len(sps.row), int) + for i in range(len(sps.ipt) - 1): + ifrm[sps.ipt[i] : sps.ipt[i + 1]] = i + ieta = etabin[sps.row, sps.col] tth_step = plut.tth.max() / 3000 + @numba.njit - def accumulate( ipf, ifrm, ieta, intensity, output ): + def accumulate(ipf, ifrm, ieta, intensity, output): for j in range(ipf.size): - output[ ipf.flat[j], ifrm.flat[j], ieta.flat[j] ] += intensity[j] - accumulate( ipf, ifrm, ieta, sps.intensity, pfs ) - - tthpf = np.zeros( output_shape, int) - tthbin = np.round( plut.tth / tth_step ).astype(int) + output[ipf.flat[j], ifrm.flat[j], ieta.flat[j]] += intensity[j] + + accumulate(ipf, ifrm, ieta, sps.intensity, pfs) + + tthpf = np.zeros(output_shape, int) + tthbin = np.round(plut.tth / tth_step).astype(int) assert tthbin.max() <= 3001 - itth = tthbin[ sps.row, sps.col ] - accumulate( ipf, ifrm, itth, sps.intensity, tthpf ) - - f,a = pl.subplots(3,7, figsize=(21,7)) + itth = tthbin[sps.row, sps.col] + accumulate(ipf, ifrm, itth, sps.intensity, tthpf) + + f, a = pl.subplots(3, 7, figsize=(21, 7)) for ax, p, j in zip(a.ravel(), pfs[1:], range(len(pfs[1:]))): - vmx = max( p.max(), 1 ) - ax.imshow(p, norm=pl.matplotlib.colors.LogNorm(vmin=0.1, vmax=vmx)) - hkls = uc.ringhkls[ uc.ringds[j] ] - title=str(hkls[-1]) + " M=%d"%(len(hkls)) - ax.set( xlabel= 'eta', ylabel='omega', xticks = [], yticks = [], title=title ) - - \ No newline at end of file + vmx = max(p.max(), 1) + ax.imshow(p, norm=pl.matplotlib.colors.LogNorm(vmin=0.1, vmax=vmx)) + hkls = uc.ringhkls[uc.ringds[j]] + title = str(hkls[-1]) + " M=%d" % (len(hkls)) + ax.set(xlabel="eta", ylabel="omega", xticks=[], yticks=[], title=title) diff --git a/ImageD11/sinograms/pread.py b/ImageD11/sinograms/pread.py index dd360724..94a22b65 100644 --- a/ImageD11/sinograms/pread.py +++ b/ImageD11/sinograms/pread.py @@ -1,4 +1,3 @@ - """WARNING: work in progress - look at mpi and/or uncompressed data instead?""" from __future__ import print_function @@ -10,81 +9,88 @@ import multiprocessing import concurrent.futures -def read_segment( args ): + +def read_segment(args): hname, dset, shmd, start, end = args - tbl = ImageD11.sinograms.properties.pks_table.fromSHM( shmd ) - with h5py.File( open(hname, "rb"), 'r') as hin: + tbl = ImageD11.sinograms.properties.pks_table.fromSHM(shmd) + with h5py.File(open(hname, "rb"), "r") as hin: sys.stdout.flush() s = start - b = 1024*1024*10 + b = 1024 * 1024 * 10 assert end <= tbl.glabel.shape[0] assert end <= tbl.pk_props.shape[1] while s < end: - e = min( end, s+b ) - hin[dset]['pk_props'].read_direct( tbl.pk_props, - np.s_[:, s:e], np.s_[:, s:e] ) - hin[dset]['glabel'].read_direct( tbl.glabel, - np.s_[s:e], np.s_[s:e] ) + e = min(end, s + b) + hin[dset]["pk_props"].read_direct( + tbl.pk_props, np.s_[:, s:e], np.s_[:, s:e] + ) + hin[dset]["glabel"].read_direct(tbl.glabel, np.s_[s:e], np.s_[s:e]) s = e - total_bytes = tbl.pk_props[:,start:end].nbytes + tbl.glabel[start:end].nbytes + total_bytes = tbl.pk_props[:, start:end].nbytes + tbl.glabel[start:end].nbytes del tbl return start, end -def create( hname, dset ): - with h5py.File(hname, 'r') as hin: + +def create(hname, dset): + with h5py.File(hname, "r") as hin: tbl = ImageD11.sinograms.properties.pks_table() - tbl.npk = hin['pks2d/npk'][:] - tbl.nlabel = hin['pks2d'].attrs['nlabel'] - tbl.ipk = hin['pks2d/ipk'][:] - for name in 'pk_props', 'glabel': - dset = hin['pks2d'][name] - setattr( tbl, name, tbl.share( name, - shape = dset.shape, - dtype = dset.dtype ) ) + tbl.npk = hin["pks2d/npk"][:] + tbl.nlabel = hin["pks2d"].attrs["nlabel"] + tbl.ipk = hin["pks2d/ipk"][:] + for name in "pk_props", "glabel": + dset = hin["pks2d"][name] + setattr(tbl, name, tbl.share(name, shape=dset.shape, dtype=dset.dtype)) del dset return tbl -def pread( tbl, hname, dset='pks2d', nproc=0 ): + +def pread(tbl, hname, dset="pks2d", nproc=0): if nproc == 0: - nproc = len( os.sched_getaffinity( os.getpid() )) - print("Using", nproc, 'processes to read') + nproc = len(os.sched_getaffinity(os.getpid())) + print("Using", nproc, "processes to read") if tbl is None: - tbl = create( hname, dset ) + tbl = create(hname, dset) shmd = tbl.export() - n = shmd[ 'glabel' ][ 'shape' ][0] - se = list(range(0,n,n//nproc)) + n = shmd["glabel"]["shape"][0] + se = list(range(0, n, n // nproc)) se[-1] = n - args = [(hname, dset, shmd, start, end) for start, end in zip(se[:-1],se[1:])] + args = [(hname, dset, shmd, start, end) for start, end in zip(se[:-1], se[1:])] nbytes = 0 - with multiprocessing.Pool( nproc ) as pool: - for rb in pool.map( read_segment, args ): + with multiprocessing.Pool(nproc) as pool: + for rb in pool.map(read_segment, args): pass return tbl -def read2pipe( dest, hname, dset, selection ): - with h5py.File( open( hname, 'rb' ), 'r' ) as hin: - dest.send_bytes( hin[ dset ][ selection ][:].tobytes() ) + +def read2pipe(dest, hname, dset, selection): + with h5py.File(open(hname, "rb"), "r") as hin: + dest.send_bytes(hin[dset][selection][:].tobytes()) dest.close() - -def readpartial( args ): + + +def readpartial(args): ary, hname, dset, selection = args write_end, read_end = multiprocessing.Pipe() - p = multiprocessing.Process( target = read2pipe, args = ( write_end, hname, dset, selection ) ) + p = multiprocessing.Process( + target=read2pipe, args=(write_end, hname, dset, selection) + ) p.start() - output = ary[ selection ] - output[:] = np.frombuffer( read_end.recv_bytes(), dtype = output.dtype ).reshape(output.shape) + output = ary[selection] + output[:] = np.frombuffer(read_end.recv_bytes(), dtype=output.dtype).reshape( + output.shape + ) p.join() read_end.close() return selection - -def getse( shape, chunks, nproc ): + +def getse(shape, chunks, nproc): dim = len(shape) - n = [ (s+c-1) // c for s,c in zip(shape, chunks)] - print(n, 'chunks in the dataset', shape, chunks ) + n = [(s + c - 1) // c for s, c in zip(shape, chunks)] + print(n, "chunks in the dataset", shape, chunks) ax = np.argmax(n) - nperproc = n[ax]//nproc + nperproc = n[ax] // nproc start = 0 block = chunks[ax] slices = [] @@ -93,42 +99,42 @@ def getse( shape, chunks, nproc ): for axis in range(len(shape)): if axis == ax: end = start + block * nperproc - slc.append( slice( start, min(end, shape[ax] ) ) ) + slc.append(slice(start, min(end, shape[ax]))) start = end else: - slc.append( slice(None, None, None) ) - slices.append( tuple(slc) ) + slc.append(slice(None, None, None)) + slices.append(tuple(slc)) return slices - -def readN( hname, dset, NPROC=40 ): - with h5py.File(hname, 'r') as hin: + +def readN(hname, dset, NPROC=40): + with h5py.File(hname, "r") as hin: ds = hin[dset] - output = np.empty( ds.shape, ds.dtype ) + output = np.empty(ds.shape, ds.dtype) chunks = ds.chunks se = getse(output.shape, chunks, NPROC) print(se) - with concurrent.futures.ThreadPoolExecutor( max_workers=NPROC ) as pool: + with concurrent.futures.ThreadPoolExecutor(max_workers=NPROC) as pool: jobs = {} for s in se: - jobs[ pool.submit( readpartial, (output, hname, dset, s ) ) ] = s + jobs[pool.submit(readpartial, (output, hname, dset, s))] = s for future in concurrent.futures.as_completed(jobs): print(future.result()) - -if __name__=="__main__": - hname = '/data/projects/3dipolyplast/NS_Analysis_jw/et12_z95_2/20230405/pks_ET12_7ns_slice_pz15_2.h5' +if __name__ == "__main__": + + hname = "/data/projects/3dipolyplast/NS_Analysis_jw/et12_z95_2/20230405/pks_ET12_7ns_slice_pz15_2.h5" import timeit - + start = timeit.default_timer() - tbl = pread( None, hname ) + tbl = pread(None, hname) end = timeit.default_timer() print("Reading took {end-start:.2f}/s".format(**locals())) - + # Single threaded start = timeit.default_timer() - chk = ImageD11.sinograms.properties.pks_table.load( hname ) + chk = ImageD11.sinograms.properties.pks_table.load(hname) end = timeit.default_timer() print("One core reading took {end-start:.2f}/s".format(**locals())) @@ -137,4 +143,3 @@ def readN( hname, dset, NPROC=40 ): assert (tbl.glabel == chk.glabel).all() end = timeit.default_timer() print("Matches {end-start:.2f}/s".format(**locals())) - diff --git a/ImageD11/sinograms/properties.py b/ImageD11/sinograms/properties.py index fd4f2a1c..fc62d2b3 100755 --- a/ImageD11/sinograms/properties.py +++ b/ImageD11/sinograms/properties.py @@ -1,4 +1,3 @@ - import os import logging import sys @@ -7,7 +6,6 @@ from timeit import default_timer import numpy as np import h5py -import hdf5plugin import scipy.sparse import scipy.sparse.csgraph import ImageD11.sinograms.dataset @@ -15,14 +13,14 @@ import numba ### The first part of the code is all to run in parallel on a multicore machine -# +# # The 2D sinogram needs to be processed. Each frame has 4 neighbors. # 2 in the same rotation and 2 in the previous and next rotation # Work is split up to be one process doing two rows and joining them # For each worker added you process a row twice # # A sparse matrix is built in shared memory. -# +# # perhaps some refactoring is needed # # it is building a sparse matrix of (npks x npks) which tags the overlaps. @@ -41,53 +39,57 @@ def remove_shm_from_resource_tracker(): More details at: https://bugs.python.org/issue38119 """ self = None + def fix_register(name, rtype): if rtype == "shared_memory": return return resource_tracker._resource_tracker.register(name, rtype) + if resource_tracker.register is not fix_register: resource_tracker.register = fix_register + def fix_unregister(name, rtype): if rtype == "shared_memory": return return resource_tracker._resource_tracker.unregister(name, rtype) + if resource_tracker.unregister is not fix_register: resource_tracker.unregister = fix_register resource_tracker.unregister = fix_unregister if "shared_memory" in resource_tracker._CLEANUP_FUNCS: del resource_tracker._CLEANUP_FUNCS["shared_memory"] + + ###################################################################################### -NPROC = max( 1, int(os.environ['SLURM_CPUS_PER_TASK']) - 1 ) +NPROC = max(1, int(os.environ["SLURM_CPUS_PER_TASK"]) - 1) + class shared_numpy_array: """See: https://bugs.python.org/issue38119 The multiprocessing pool must stick around until the process exits """ - def __init__(self, ary=None, shape=None, dtype=None, shmname=None, - fill = None): + + def __init__(self, ary=None, shape=None, dtype=None, shmname=None, fill=None): if ary is not None: shape = ary.shape dtype = ary.dtype self.nbytes = ary.nbytes else: self.nbytes = np.prod(shape) * np.dtype(dtype).itemsize - + if shmname is None: - self.shm = shared_memory.SharedMemory( - create=True, size = self.nbytes ) - self.creator=True + self.shm = shared_memory.SharedMemory(create=True, size=self.nbytes) + self.creator = True else: - self.shm = shared_memory.SharedMemory( - create=False, - name = shmname ) - self.creator=False - self.array = np.ndarray( shape, dtype, buffer=self.shm.buf) + self.shm = shared_memory.SharedMemory(create=False, name=shmname) + self.creator = False + self.array = np.ndarray(shape, dtype, buffer=self.shm.buf) if ary is not None: self.array[:] = ary if fill is not None: self.array[:] = fill - + def __del__(self): del self.array self.shm.close() @@ -95,29 +97,27 @@ def __del__(self): try: self.shm.unlink() except Exception as e: - print('Error: ',e) - - def export(self): - return { 'shape' : self.array.shape, - 'dtype' : self.array.dtype, - 'shmname' : self.shm.name } - + print("Error: ", e) + def export(self): + return { + "shape": self.array.shape, + "dtype": self.array.dtype, + "shmname": self.shm.name, + } class tictoc: def __init__(self): self.t = default_timer() - def __call__(self, msg = '' ): + + def __call__(self, msg=""): t = default_timer() - print("%s : %.6f /s"%( msg, t - self.t ) ) + print("%s : %.6f /s" % (msg, t - self.t)) self.t = default_timer() - - - -def pairrow( s , row): +def pairrow(s, row): """ s = SparseScan @@ -126,60 +126,75 @@ def pairrow( s , row): [ idty, iomega, idty, iomega ] : nedge, array( (nedge, 3) ) (src, dest, npixels) """ - s.omegaorder = np.argsort( s.motors["omega"] ) # not mod 360 here + s.omegaorder = np.argsort(s.motors["omega"]) # not mod 360 here s.sinorow = row - olap = ImageD11.sparseframe.overlaps_linear( s.nnz.max()+1 ) + olap = ImageD11.sparseframe.overlaps_linear(s.nnz.max() + 1) pairs = {} for i in range(1, len(s.omegaorder)): - if (s.nnz[s.omegaorder[i]] == 0) or (s.nnz[s.omegaorder[i-1]] == 0): + if (s.nnz[s.omegaorder[i]] == 0) or (s.nnz[s.omegaorder[i - 1]] == 0): continue - f0 = s.getframe( s.omegaorder[i-1] ) - f1 = s.getframe( s.omegaorder[i] ) - ans = olap( f0.row, f0.col, f0.pixels['labels'], s.nlabels[ s.omegaorder[i-1] ], - f1.row, f1.col, f1.pixels['labels'], s.nlabels[ s.omegaorder[i] ] ) - pairs[ row, s.omegaorder[i-1], row, s.omegaorder[i] ] = ans + f0 = s.getframe(s.omegaorder[i - 1]) + f1 = s.getframe(s.omegaorder[i]) + ans = olap( + f0.row, + f0.col, + f0.pixels["labels"], + s.nlabels[s.omegaorder[i - 1]], + f1.row, + f1.col, + f1.pixels["labels"], + s.nlabels[s.omegaorder[i]], + ) + pairs[row, s.omegaorder[i - 1], row, s.omegaorder[i]] = ans return pairs - -def pairscans( s1, s2, omegatol = 0.051 ): - olap = ImageD11.sparseframe.overlaps_linear( max(s1.nnz.max(), s2.nnz.max())+1 ) - assert len(s1.nnz) == len(s2.nnz ) +def pairscans(s1, s2, omegatol=0.051): + olap = ImageD11.sparseframe.overlaps_linear(max(s1.nnz.max(), s2.nnz.max()) + 1) + assert len(s1.nnz) == len(s2.nnz) pairs = {} - omega_1 = s1.motors['omega'] % 360 - omega_2 = s2.motors['omega'] % 360 + omega_1 = s1.motors["omega"] % 360 + omega_2 = s2.motors["omega"] % 360 for i in range(len(s1.nnz)): # check omega angles match o1 = omega_1[i] - j = np.argmin( abs( omega_2 - o1 ) ) + j = np.argmin(abs(omega_2 - o1)) o2 = omega_2[j] - if abs( o1 - o2 ) > omegatol: + if abs(o1 - o2) > omegatol: # this frame has no neighbor continue if (s1.nnz[i] == 0) or (s2.nnz[j] == 0): continue - f0 = s1.getframe( i ) - f1 = s2.getframe( j ) - ans = olap( f0.row, f0.col, f0.pixels['labels'], s1.nlabels[ i ], - f1.row, f1.col, f1.pixels['labels'], s2.nlabels[ j ] ) - pairs[ s1.sinorow, i, s2.sinorow, j ] = ans + f0 = s1.getframe(i) + f1 = s2.getframe(j) + ans = olap( + f0.row, + f0.col, + f0.pixels["labels"], + s1.nlabels[i], + f1.row, + f1.col, + f1.pixels["labels"], + s2.nlabels[j], + ) + pairs[s1.sinorow, i, s2.sinorow, j] = ans return pairs -def props(scan, i, algorithm='lmlabel', wtmax=None ): +def props(scan, i, algorithm="lmlabel", wtmax=None): """ scan = sparseframe.SparseScan object i = sinogram row id : used for tagging pairs - algorithm = 'lmlabel' | 'cplabel' + algorithm = 'lmlabel' | 'cplabel' Labels the peaks with lmlabel Assumes a regular scan for labelling frames returns ( row, properties[(s1,sI,sRow,sCol,frame),:], pairs, scan ) """ scan.sinorow = i - getattr( scan, algorithm )( countall=False ) # labels all the pixels in the scan. + getattr(scan, algorithm)(countall=False) # labels all the pixels in the scan. npks = scan.total_labels - r = np.empty( (5,npks), np.int64 ) + r = np.empty((5, npks), np.int64) s = 0 j0 = i * scan.shape[0] for j in range(scan.shape[0]): @@ -188,98 +203,112 @@ def props(scan, i, algorithm='lmlabel', wtmax=None ): f0 = scan.getframe(j) e = s + scan.nlabels[j] # [1:] means skip the background labels == 0 output - r[0,s:e] = np.bincount( f0.pixels['labels'] )[1:] - wt = f0.pixels['intensity'].astype(np.int64) + r[0, s:e] = np.bincount(f0.pixels["labels"])[1:] + wt = f0.pixels["intensity"].astype(np.int64) if wtmax is not None: m = wt > wtmax n = m.sum() if n > 0: wt[m] = wtmax # print(scan,'replaced',n) - signal = np.bincount( f0.pixels['labels'], weights=wt)[1:] + signal = np.bincount(f0.pixels["labels"], weights=wt)[1:] if signal.min() < 1: - print("Bad data",scan.hname, scan.scan,i,j, - scan.nlabels[j], scan.nnz[j],f0.pixels['intensity'].min()) - raise Exception( 'bad data' ) - r[1,s:e] = signal - r[2,s:e] = np.bincount( f0.pixels['labels'], weights=f0.row*wt )[1:] - r[3,s:e] = np.bincount( f0.pixels['labels'], weights=f0.col*wt )[1:] - r[4,s:e] = j + j0 + print( + "Bad data", + scan.hname, + scan.scan, + i, + j, + scan.nlabels[j], + scan.nnz[j], + f0.pixels["intensity"].min(), + ) + raise Exception("bad data") + r[1, s:e] = signal + r[2, s:e] = np.bincount(f0.pixels["labels"], weights=f0.row * wt)[1:] + r[3, s:e] = np.bincount(f0.pixels["labels"], weights=f0.col * wt)[1:] + r[4, s:e] = j + j0 s = e # Matrix entries for this scan with itself: - pairs = pairrow( scan, i ) + pairs = pairrow(scan, i) return r, pairs + ###### testing / debug if 0: + def checkprops(i): - hname = 'ds_MA4752_S4_2_XRD_DTL1z60_sparsefull.h5' + hname = "ds_MA4752_S4_2_XRD_DTL1z60_sparsefull.h5" ds = ImageD11.sinograms.dataset.load(hname) - scan = ImageD11.sparseframe.SparseScan( hname[3:], ds.scans[i] ) - return props( scan, i ) + scan = ImageD11.sparseframe.SparseScan(hname[3:], ds.scans[i]) + return props(scan, i) + r, p = checkprops(400) - for i,(k,(n,v)) in enumerate(p.items()): - if i < 3 or i > (len(p)-3): - print(i,k,n) + for i, (k, (n, v)) in enumerate(p.items()): + if i < 3 or i > (len(p) - 3): + print(i, k, n) print(v.T) ###### -def get_start_end( n, p ): - """ For splitting up the scan row over processes + +def get_start_end(n, p): + """For splitting up the scan row over processes n = number of jobs p = number of processes - + All processes must do at least two rows to get the overlaps between rows. The work sharing is based on these overlaps. - + rows : 0 1 2 3 4 5 etc. overlap 01 12 23 34 45 """ - overlaps = [ (i-1,i) for i in range(1,n) ] + overlaps = [(i - 1, i) for i in range(1, n)] assert len(overlaps) >= p joins_per_job = np.zeros(p, int) - for i,o in enumerate(overlaps): - joins_per_job[i%p] += 1 + for i, o in enumerate(overlaps): + joins_per_job[i % p] += 1 assert np.sum(joins_per_job) == len(overlaps) start = 0 - slices=[] + slices = [] for i in range(p): end = start + joins_per_job[i] ss = overlaps[start][0] - se = overlaps[end-1][1] - slices.append( (ss, se) ) + se = overlaps[end - 1][1] + slices.append((ss, se)) start += joins_per_job[i] return slices -def countse( se ): - """ counts the points in a block of start/end""" + +def countse(se): + """counts the points in a block of start/end""" k = 0 for s, e in se: - for i in range(s, e+1): + for i in range(s, e + 1): k += 1 return k + def testit(): - """ sanity check """ - for t in range(95,110): + """sanity check""" + for t in range(95, 110): pairs = set() - s = np.arange(t) r = set() - for st, en in get_start_end( t, NPROC): - last = None - for i in range(st,en+1): - x = s[i] # indexerror? + for st, en in get_start_end(t, NPROC): + for i in range(st, en + 1): r.add(i) if i > st: - pairs.add((i,i-1)) - assert len(pairs) == t-1 + pairs.add((i, i - 1)) + assert len(pairs) == t - 1 assert len(r) == t + + # testit() -def compute_storage( peaks ): - '''make the cumulative sums of peaks to figure out storage space + +def compute_storage(peaks): + """make the cumulative sums of peaks to figure out storage space holds the row, npks, nii, nij - ''' + """ P = {} M = {} for row, npks, nii, nij in peaks: @@ -292,23 +321,25 @@ def compute_storage( peaks ): else: P[row] = npks M[row] = nii, nij - ks = sorted( P.keys() ) - npk = np.array( [ (P[k],M[k][0],M[k][1]) for k in ks ] ) + ks = sorted(P.keys()) + npk = np.array([(P[k], M[k][0], M[k][1]) for k in ks]) return ks, npk + class pks_table: - def __init__(self, - npk=None, - ipk=None, - pk_props=None, - rc=None, - rpk=None, - glabel=None, - nlabel=0, - use_shm=False, - ): + def __init__( + self, + npk=None, + ipk=None, + pk_props=None, + rc=None, + rpk=None, + glabel=None, + nlabel=0, + use_shm=False, + ): """ - Cases: + Cases: Create from npks counting -> here Read from a file -> classmethod pks_table.load( h5name ) Read from shared memory -> classmethod pks_table.fromSHM( h5name ) @@ -324,15 +355,15 @@ def __init__(self, self.shared = {} # otherwise create if self.npk is not None: - self.create( self.npk ) - + self.create(self.npk) + def share(self, name, *args, **kwds): """puts the array into shared memory returns the shared copy """ - self.shared[name] = shared_numpy_array( *args, **kwds ) + self.shared[name] = shared_numpy_array(*args, **kwds) return self.shared[name].array - + def create(self, npk): # [ nscans, 3 ] # number_of_peaks_in_scan @@ -341,239 +372,257 @@ def create(self, npk): self.npk = npk s = npk.sum(axis=0) # pointers to peak tables per scan - ipk = np.empty( npk.shape[0]+1, int ) + ipk = np.empty(npk.shape[0] + 1, int) ipk[0] = 0 - ipk[1:] = np.cumsum( npk[:,0] ) - self.ipk = self.share( 'ipk', ipk ) + ipk[1:] = np.cumsum(npk[:, 0]) + self.ipk = self.share("ipk", ipk) # pointers to r/c positions - rpk = np.empty( npk.shape[0]+1, int ) + rpk = np.empty(npk.shape[0] + 1, int) rpk[0] = 0 - rpk[1:] = np.cumsum( npk[:,1]+npk[:,2] ) - self.rpk = self.share( 'rpk', rpk ) - self.pk_props = self.share( 'pk_props', shape=(5, s[0]), dtype=np.int64 ) - self.rc = self.share( 'rc', shape=(3, s[1]+s[2]), dtype=np.int64 ) - + rpk[1:] = np.cumsum(npk[:, 1] + npk[:, 2]) + self.rpk = self.share("rpk", rpk) + self.pk_props = self.share("pk_props", shape=(5, s[0]), dtype=np.int64) + self.rc = self.share("rc", shape=(3, s[1] + s[2]), dtype=np.int64) + def export(self): - return { name : self.shared[name].export() for name in self.shared } - - def __del__(self): + return {name: self.shared[name].export() for name in self.shared} + + def __del__(self): del self.ipk del self.rpk del self.pk_props del self.rc names = list(self.shared.keys()) for name in names: - o = self.shared.pop( name ) + o = self.shared.pop(name) del o - - def guesschunk(self, ar, m=64*40): + + def guesschunk(self, ar, m=64 * 40): return None """ for parallel / compressed """ chunk = list(ar.shape) nbytes = np.prod(chunk) * ar.dtype.itemsize - if (nbytes // m) < pow(2,16): - print('Guessing 1 chunk for', ar.shape, ar.dtype) + if (nbytes // m) < pow(2, 16): + print("Guessing 1 chunk for", ar.shape, ar.dtype) return tuple(chunk) - axsplit = np.argmax( chunk ) + axsplit = np.argmax(chunk) n = chunk[axsplit] - chunk[axsplit] = max(1, n//m) - print('Guessing chunks for', ar.shape, ar.dtype, chunk) + chunk[axsplit] = max(1, n // m) + print("Guessing chunks for", ar.shape, ar.dtype, chunk) return tuple(chunk) - - def save(self, h5name, group='pks2d', rc = False): - opts = {} # No compression is faster - with h5py.File( h5name, 'a' ) as hout: - grp = hout.require_group( group ) - ds = grp.require_dataset( name = 'ipk', - shape = self.ipk.shape, - chunks = self.guesschunk( self.ipk ), - dtype = self.ipk.dtype, **opts ) + + def save(self, h5name, group="pks2d", rc=False): + opts = {} # No compression is faster + with h5py.File(h5name, "a") as hout: + grp = hout.require_group(group) + ds = grp.require_dataset( + name="ipk", + shape=self.ipk.shape, + chunks=self.guesschunk(self.ipk), + dtype=self.ipk.dtype, + **opts + ) ds[:] = self.ipk - ds.attrs['description']='pointer to start of a scan' - ds = grp.require_dataset( name = 'pk_props', - shape = self.pk_props.shape, - chunks = self.guesschunk( self.pk_props ), - dtype = self.pk_props.dtype, **opts ) + ds.attrs["description"] = "pointer to start of a scan" + ds = grp.require_dataset( + name="pk_props", + shape=self.pk_props.shape, + chunks=self.guesschunk(self.pk_props), + dtype=self.pk_props.dtype, + **opts + ) ds[:] = self.pk_props - ds.attrs['description']='[ ( s1, sI, srI, scI, id ), Npks ]' - ds = grp.require_dataset( name = 'npk', - shape = self.npk.shape, - chunks = self.guesschunk( self.npk ), - dtype = self.npk.dtype, **opts ) + ds.attrs["description"] = "[ ( s1, sI, srI, scI, id ), Npks ]" + ds = grp.require_dataset( + name="npk", + shape=self.npk.shape, + chunks=self.guesschunk(self.npk), + dtype=self.npk.dtype, + **opts + ) ds[:] = self.npk - ds.attrs['description']="[ nscans, (N_peaks_in_scan, N_pairs_ii, N_pairs_ij) ]" - if hasattr(self,'glabel') and self.glabel is not None: - ds = grp.require_dataset( name = 'glabel', - shape = self.glabel.shape, - chunks = self.guesschunk( self.glabel ), - dtype = self.glabel.dtype, **opts ) + ds.attrs[ + "description" + ] = "[ nscans, (N_peaks_in_scan, N_pairs_ii, N_pairs_ij) ]" + if hasattr(self, "glabel") and self.glabel is not None: + ds = grp.require_dataset( + name="glabel", + shape=self.glabel.shape, + chunks=self.guesschunk(self.glabel), + dtype=self.glabel.dtype, + **opts + ) ds[:] = self.glabel - grp.attrs['nlabel'] = self.nlabel - if rc and hasattr(self, 'rc') and self.rc is not None: - ds = grp.require_dataset( name = 'rc', - shape = self.rc.shape, - chunks = self.guesschunk( self.rc ), - dtype = self.rc.dtype, **opts ) + grp.attrs["nlabel"] = self.nlabel + if rc and hasattr(self, "rc") and self.rc is not None: + ds = grp.require_dataset( + name="rc", + shape=self.rc.shape, + chunks=self.guesschunk(self.rc), + dtype=self.rc.dtype, + **opts + ) ds[:] = self.rc - ds.attrs['description'] = "row/col array for sparse connected pixels COO matrix" - + ds.attrs[ + "description" + ] = "row/col array for sparse connected pixels COO matrix" - @classmethod def fromSHM(cls, dct): - names = 'ipk', 'rpk', 'pk_props', 'rc', 'glabel' - sharrays = { name : shared_numpy_array( **dct[name] ) - for name in names if name in dct } - arrays = { name : sharrays[name].array - for name in names if name in dct } - o = cls( **arrays ) + names = "ipk", "rpk", "pk_props", "rc", "glabel" + sharrays = { + name: shared_numpy_array(**dct[name]) for name in names if name in dct + } + arrays = {name: sharrays[name].array for name in names if name in dct} + o = cls(**arrays) o.shared = sharrays return o - + @classmethod - def load(cls, h5name, h5group='pks2d'): - with h5py.File( h5name, 'r' ) as hin: - grp = hin[ h5group ] - ipk = grp[ 'ipk' ][:] - pk_props = grp[ 'pk_props' ][:] - if 'glabel' in grp: - glabel = grp['glabel'][:] - nlabel = grp.attrs['nlabel'] + def load(cls, h5name, h5group="pks2d"): + with h5py.File(h5name, "r") as hin: + grp = hin[h5group] + ipk = grp["ipk"][:] + pk_props = grp["pk_props"][:] + if "glabel" in grp: + glabel = grp["glabel"][:] + nlabel = grp.attrs["nlabel"] else: glabel = None nlabel = 0 # rc? - npk = grp['npk'][:] - nlabel = grp.attrs['nlabel'] - obj = cls( ipk=ipk, pk_props=pk_props, glabel=glabel, nlabel=nlabel ) - obj.npk = npk # this is ugly. Sending as arg causes allocate. - return obj - + npk = grp["npk"][:] + nlabel = grp.attrs["nlabel"] + obj = cls(ipk=ipk, pk_props=pk_props, glabel=glabel, nlabel=nlabel) + obj.npk = npk # this is ugly. Sending as arg causes allocate. + return obj + def find_uniq(self, outputfile=None, use_scipy=False): - """ find the unique labels from the rc array """ + """find the unique labels from the rc array""" t = tictoc() n = self.ipk[-1] # print("Row/col sparse array") # for i in range(3): # print(self.rc[i].dtype,self.rc[i].shape)i if outputfile is not None: - with h5py.File(outputfile,"w") as hout: - hout['data']=self.rc[2] - hout['i'] = self.rc[0] - hout['j'] = self.rc[1] + with h5py.File(outputfile, "w") as hout: + hout["data"] = self.rc[2] + hout["i"] = self.rc[0] + hout["j"] = self.rc[1] return None, None if use_scipy: - coo = scipy.sparse.coo_matrix( (self.rc[2], - (self.rc[0], self.rc[1])), - shape=(n,n)) - t('coo') - cc = scipy.sparse.csgraph.connected_components( coo, directed=False, return_labels=True ) - t('find connected components') + coo = scipy.sparse.coo_matrix( + (self.rc[2], (self.rc[0], self.rc[1])), shape=(n, n) + ) + t("coo") + cc = scipy.sparse.csgraph.connected_components( + coo, directed=False, return_labels=True + ) + t("find connected components") else: - cc = find_ND_labels( self.rc[0], self.rc[1], n ) + cc = find_ND_labels(self.rc[0], self.rc[1], n) self.cc = cc self.nlabel, self.glabel = cc return cc - + def pk2dmerge(self, omega, dty): """ creates a dictionary of the 3D peaks """ assert omega.shape == dty.shape assert omega.size > self.pk_props[4].max() - - out = np.zeros( (7, self.nlabel) , float) - n = numbapkmerge( self.glabel, self.pk_props, omega, dty, out) + + out = np.zeros((7, self.nlabel), float) + _ = numbapkmerge(self.glabel, self.pk_props, omega, dty, out) allpks = { - 's_raw' : out[2]/out[1], - 'f_raw' : out[3]/out[1], - 'omega' : out[4]/out[1], - 'Number_of_pixels' : out[0], - 'sum_intensity' : out[1], - 'dty' : out[5]/out[1], - 'spot3d_id' : np.arange(len(out[0])), # points back to labels in pk2d - 'npk2d': out[6], + "s_raw": out[2] / out[1], + "f_raw": out[3] / out[1], + "omega": out[4] / out[1], + "Number_of_pixels": out[0], + "sum_intensity": out[1], + "dty": out[5] / out[1], + "spot3d_id": np.arange(len(out[0])), # points back to labels in pk2d + "npk2d": out[6], } return allpks - + def pk2d(self, omega, dty): s1, sI, srI, scI, frm = self.pk_props allpks = { - 's_raw' : srI / sI, - 'f_raw' : scI / sI, - 'omega' : omega.flat[ frm ], - 'dty' : dty.flat[ frm ], - 'Number_of_pixels' : s1, - 'sum_intensity' : sI, - 'spot3d_id' : self.glabel, + "s_raw": srI / sI, + "f_raw": scI / sI, + "omega": omega.flat[frm], + "dty": dty.flat[frm], + "Number_of_pixels": s1, + "sum_intensity": sI, + "spot3d_id": self.glabel, } return allpks - + + if 0: - def load_and_transpose( hname, itype, vtype ): - """ Read in a coo file saved by pks_table.find_uniq - """ - with h5py.File(hname,'r') as hin: - di = hin['i'] - ii = np.empty( len(di)*2, itype ) - jj = np.empty( len(di)*2, itype ) - vv = np.empty( len(di)*2, vtype ) - ii[:len(di)] = hin['i'][:] # this should cast when filling - ii[len(di):] = hin['j'][:] - jj[:len(di)] = hin['j'][:] - jj[len(di):] = hin['i'][:] - vv[:len(di)] = hin['data'][:] - vv[len(di):] = hin['data'][:] - return ii,jj,vv - + + def load_and_transpose(hname, itype, vtype): + """Read in a coo file saved by pks_table.find_uniq""" + with h5py.File(hname, "r") as hin: + di = hin["i"] + ii = np.empty(len(di) * 2, itype) + jj = np.empty(len(di) * 2, itype) + vv = np.empty(len(di) * 2, vtype) + ii[: len(di)] = hin["i"][:] # this should cast when filling + ii[len(di) :] = hin["j"][:] + jj[: len(di)] = hin["j"][:] + jj[len(di) :] = hin["i"][:] + vv[: len(di)] = hin["data"][:] + vv[len(di) :] = hin["data"][:] + return ii, jj, vv + @numba.njit -def numbapkmerge( labels, pks, omega, dty, out): +def numbapkmerge(labels, pks, omega, dty, out): for k in range(len(labels)): - frm = pks[ 4, k ] - o = omega.flat[ frm ] - y = dty.flat[ frm ] - l = labels[ k ] - out[0,l] += pks[0,k] # s1 == number of pixels in a peak - out[1,l] += pks[1,k] # sI == sum of the intensity - out[2,l] += pks[2,k] # srI === sum of intensity * row - out[3,l] += pks[3,k] # scI === sum of intensity * column - out[4,l] += o * pks[1,k] - out[5,l] += y * pks[1,k] - out[6,l] += 1 # s0 == number of 2D peaks + frm = pks[4, k] + o = omega.flat[frm] + y = dty.flat[frm] + l = labels[k] + out[0, l] += pks[0, k] # s1 == number of pixels in a peak + out[1, l] += pks[1, k] # sI == sum of the intensity + out[2, l] += pks[2, k] # srI === sum of intensity * row + out[3, l] += pks[3, k] # scI === sum of intensity * column + out[4, l] += o * pks[1, k] + out[5, l] += y * pks[1, k] + out[6, l] += 1 # s0 == number of 2D peaks return k - - - + @numba.njit(parallel=True) -def numbalabelNd( i, j, pkid, flip = 0 ): +def numbalabelNd(i, j, pkid, flip=0): """ i, j are the pairs of overlapping peaks pkid are the current labels of the peaks - + This scans all pairs and for each pair it makes pkid[i] = pkid[j] = min(pkid[i], pkid[j]) - + Run this enough times and eventually all peaks point back to the first one they overlap. - + Using parallel seems dodgy to me. Might be a race condition, but it seems like the code is legal so long as only these addresses are touched? """ # scanning in forwards direction nbad = 0 - N = len(i) - 1 + N = len(i) - 1 for k in numba.prange(len(i)): - p = k + flip * ( N - 2 * k ) + p = k + flip * (N - 2 * k) # numba was fussy, no idea why, gets confused # when indexing using an if statement. # so made it like this: # flip == 0 -> k # flip == 1 -> k + flip * ( N - 2 * k ) # -> N - k - # Changing array iteration direction seemed to + # Changing array iteration direction seemed to # speed up convergence. DFS or BFS is likely # better. But stacks can overflow. pi = pkid[i[p]] @@ -583,77 +632,87 @@ def numbalabelNd( i, j, pkid, flip = 0 ): pkid[i[p]] = m pkid[j[p]] = m nbad += 1 - return nbad - - + return nbad + + @numba.njit(parallel=True) -def get_clean_labels( l ): +def get_clean_labels(l): """ Given the labelling in l, put clean labels in the place. - + l = integer array with value = label of lowest 2d peak in this ND group. """ n = 0 - assert l[0] == 0 , 'first label should be zero' - for i in range(len(l)): # count the labels. NOT PARALLEL! + assert l[0] == 0, "first label should be zero" + for i in range(len(l)): # count the labels. NOT PARALLEL! if l[i] == i: l[i] = n n += 1 else: - l[i] = -l[i] # for inplace, tag the ones to be redone + l[i] = -l[i] # for inplace, tag the ones to be redone for i in numba.prange(len(l)): j = l[i] - if j < 0: # zeros and pointers to zero should not change. + if j < 0: # zeros and pointers to zero should not change. l[i] = l[-j] return n - - -def find_ND_labels( i, j, npks, verbose=1 ): + + +def find_ND_labels(i, j, npks, verbose=1): start = time.time() - labels = np.arange( npks, dtype=int ) - labels_final = np.zeros_like( labels ) # memory error early please + labels = np.arange(npks, dtype=int) + # labels_final = np.zeros_like( labels ) # memory error early please flip = 0 - b = numbalabelNd( i, j, labels, flip=flip ) + b = numbalabelNd(i, j, labels, flip=flip) while 1: - if verbose>1: + if verbose > 1: dt = time.time() - start - print("%.1f %.6f"%(dt, b/1e6 )) + print("%.1f %.6f" % (dt, b / 1e6)) if verbose == 1: - print('.', end='') + print(".", end="") if b == 0: break - flip = (1,0)[flip] # exchange 1 and 0 - b = numbalabelNd( i, j, labels, flip=flip ) - n = get_clean_labels( labels ) + flip = (1, 0)[flip] # exchange 1 and 0 + b = numbalabelNd(i, j, labels, flip=flip) + n = get_clean_labels(labels) return n, labels - -def pks_table_from_scan( sparsefilename, ds, row ): +def pks_table_from_scan(sparsefilename, ds, row): """ Labels one rotation scan to a peaks table - + sparsefilename = sparse pixels file dataset = ImageD11.sinograms.dataset row = index for dataset.scan[ row ] - + returns a pks_table. You might want to call one of "save" or "pk2d" or "pk2dmerge" on the result - - This is probably not threadsafe - """ - sps = ImageD11.sparseframe.SparseScan( sparsefilename, ds.scans[row] ) - sps.motors['omega'] = ds.omega[row] - peaks, pairs = ImageD11.sinograms.properties.props( sps, row ) + + This is probably not threadsafe + """ + sps = ImageD11.sparseframe.SparseScan(sparsefilename, ds.scans[row]) + sps.motors["omega"] = ds.omega[row] + peaks, pairs = ImageD11.sinograms.properties.props(sps, row) # which frame/peak is which in the peaks array - # For the 3D merging - n1 = sum( pairs[k][0] for k in pairs ) # how many overlaps were found: - npk = np.array( [ ( peaks.shape[1], n1, 0), ] ) - pkst = ImageD11.sinograms.properties.pks_table( npk = npk, use_shm=False ) + # For the 3D merging + n1 = sum(pairs[k][0] for k in pairs) # how many overlaps were found: + npk = np.array( + [ + (peaks.shape[1], n1, 0), + ] + ) + pkst = ImageD11.sinograms.properties.pks_table(npk=npk, use_shm=False) pkst.pk_props = peaks - rc = pkst.rc + rc = pkst.rc s = 0 - pkid = np.concatenate(([0,], np.cumsum(sps.nlabels))) + pkid = np.concatenate( + ( + [ + 0, + ], + np.cumsum(sps.nlabels), + ) + ) for (row1, frame1, row2, frame2), (npairs, ijn) in pairs.items(): # add entries into a sparse matrix # key: (500, 2, 501, 2893) @@ -661,17 +720,17 @@ def pks_table_from_scan( sparsefilename, ds, row ): # [ 1, 2, 2], ... if npairs == 0: continue - #assert (row1 == i) and (row2 == i), (row1,row2,i) + # assert (row1 == i) and (row2 == i), (row1,row2,i) e = s + npairs assert e <= rc.shape[1] - #rc[ 0, s : e ] = ip[row1] + pkid[row1][frame1] + ijn[:,0] - 1 # col - #rc[ 1, s : e ] = ip[row2] + pkid[row2][frame2] + ijn[:,1] - 1 # row - rc[ 0, s : e ] = pkid[frame1] + ijn[:,0] - 1 # col - rc[ 1, s : e ] = pkid[frame2] + ijn[:,1] - 1 # row - rc[ 2, s : e ] = ijn[:,2] # num pixels + # rc[ 0, s : e ] = ip[row1] + pkid[row1][frame1] + ijn[:,0] - 1 # col + # rc[ 1, s : e ] = ip[row2] + pkid[row2][frame2] + ijn[:,1] - 1 # row + rc[0, s:e] = pkid[frame1] + ijn[:, 0] - 1 # col + rc[1, s:e] = pkid[frame2] + ijn[:, 1] - 1 # row + rc[2, s:e] = ijn[:, 2] # num pixels s = e - uni = pkst.find_uniq() - + _ = pkst.find_uniq() + """ if 0: # future TODO : scoring overlaps better. ks = list( pairs.keys() ) k = ks[100] @@ -683,67 +742,74 @@ def pks_table_from_scan( sparsefilename, ds, row ): n1 = npx1[ijo[:,0]-1] # number of pixels in pk1 (overlapping) n2 = npx2[ijo[:,1]-1] # number of pixels in pk2 (overlapping) no = ijo[:,2] - + """ + # pk3d = pkst.pk2dmerge(ds.omega, ds.dty) - return pkst - - - + return pkst + + def process(qin, qshm, qout, hname, scans, options): remove_shm_from_resource_tracker() start, end = qin.get() n2 = 0 # allocate lists to hold results nrows = end - start + 1 - pii = {} + pii = {} pij = {} mypks = {} pkid = {} - prev = None # suppress flake8 idiocy + prev = None # suppress flake8 idiocy # This is the 1D scan within the same row - for i in range(start, end+1): - scan = ImageD11.sparseframe.SparseScan( hname, scans[i] ) + for i in range(start, end + 1): + scan = ImageD11.sparseframe.SparseScan(hname, scans[i]) global omega - scan.motors['omega'] = omega[i] - mypks[i], pii[i] = props(scan, i, algorithm = options['algorithm'], - wtmax = options['wtmax'] - ) -# nlabels[i] = scan.nlabels # peaks per frame information - pkid[i] = np.concatenate(([0,], np.cumsum(scan.nlabels))) - n1 = sum( pii[i][k][0] for k in pii[i] ) + scan.motors["omega"] = omega[i] + mypks[i], pii[i] = props( + scan, i, algorithm=options["algorithm"], wtmax=options["wtmax"] + ) + # nlabels[i] = scan.nlabels # peaks per frame information + pkid[i] = np.concatenate( + ( + [ + 0, + ], + np.cumsum(scan.nlabels), + ) + ) + n1 = sum(pii[i][k][0] for k in pii[i]) if i > start: - pij[i] = pairscans( scan, prev ) - n2 = sum( pij[i][k][0] for k in pij[i] ) + pij[i] = pairscans(scan, prev) + n2 = sum(pij[i][k][0] for k in pij[i]) # number of pair overlaps required for the big matrix - qout.put( (i, len(mypks[i][0]), n1, n2) ) + qout.put((i, len(mypks[i][0]), n1, n2)) prev = scan # Now we are waiting for the shared memory to save the results shm = qshm.get() - pkst = pks_table.fromSHM( shm ) + pkst = pks_table.fromSHM(shm) ip = pkst.ipk rc = pkst.rc # For each row, save our local results - for i in range(start, end+1): + for i in range(start, end + 1): if (i == start) and (i > 0): # will be done by the previous worker continue # now copy our results to shared memory. First the peaks: - pkst.pk_props[:, ip[i]: ip[i+1] ] = mypks[i] + pkst.pk_props[:, ip[i] : ip[i + 1]] = mypks[i] # # find the unique starting id for each frame: # Where to store these in the shared memory s = ipstart = pkst.rpk[i] - ipend = pkst.rpk[i+1] + ipend = pkst.rpk[i + 1] # - if 0: # debugging - n1 = sum( pii[i][k][0] for k in pii[i] ) + if 0: # debugging + n1 = sum(pii[i][k][0] for k in pii[i]) if i > start: - n2 = sum( pij[i][k][0] for k in pij[i] ) + n2 = sum(pij[i][k][0] for k in pij[i]) else: n2 = 0 # # will be done by the previous worker # print('Debugging:',i, ipstart, ipend, n1, n2, ipend-ipstart, n1+n2) - assert (ipend - ipstart)==(n1 + n2) + assert (ipend - ipstart) == (n1 + n2) # for (row1, frame1, row2, frame2), (npairs, ijn) in pii[i].items(): # add entries into a sparse matrix @@ -755,87 +821,92 @@ def process(qin, qshm, qout, hname, scans, options): assert (row1 == i) and (row2 == i) e = s + npairs assert e <= ipend - rc[ 0, s : e ] = ip[row1] + pkid[row1][frame1] + ijn[:,0] - 1 # col - rc[ 1, s : e ] = ip[row2] + pkid[row2][frame2] + ijn[:,1] - 1 # row - rc[ 2, s : e ] = ijn[:,2] # num pixels + rc[0, s:e] = ip[row1] + pkid[row1][frame1] + ijn[:, 0] - 1 # col + rc[1, s:e] = ip[row2] + pkid[row2][frame2] + ijn[:, 1] - 1 # row + rc[2, s:e] = ijn[:, 2] # num pixels s = e if i == 0: - continue # no pairs to previous row exit + continue # no pairs to previous row exit for (row1, frame1, row2, frame2), (npairs, ijn) in pij[i].items(): if npairs == 0: continue - assert (row1 == i) and (row2 == i-1) + assert (row1 == i) and (row2 == i - 1) e = s + npairs assert e <= ipend - rc[ 0, s : e ] = ip[row1] + pkid[row1][frame1] + ijn[:,0] - 1 # col - rc[ 1, s : e ] = ip[row2] + pkid[row2][frame2] + ijn[:,1] - 1 # row - rc[ 2, s : e ] = ijn[:,2] # num pixels + rc[0, s:e] = ip[row1] + pkid[row1][frame1] + ijn[:, 0] - 1 # col + rc[1, s:e] = ip[row2] + pkid[row2][frame2] + ijn[:, 1] - 1 # row + rc[2, s:e] = ijn[:, 2] # num pixels s = e qout.put(start) - - + + def goforit(ds, sparsename, options): qin = mp.Queue(maxsize=NPROC) qshm = mp.Queue(maxsize=NPROC) qresult = mp.Queue(maxsize=len(ds.scans)) out = [] - with mp.Pool(NPROC, initializer=process, - initargs=(qin, qshm, qresult, sparsename, ds.scans, options)) as pool: - slices = get_start_end( len(ds.scans), NPROC ) - for i,(s,e) in enumerate(slices): - qin.put((s,e)) - waiting = countse( slices ) + with mp.Pool( + NPROC, + initializer=process, + initargs=(qin, qshm, qresult, sparsename, ds.scans, options), + ) as pool: + slices = get_start_end(len(ds.scans), NPROC) + for i, (s, e) in enumerate(slices): + qin.put((s, e)) + waiting = countse(slices) for i in tqdm.tqdm(range(waiting)): ans = qresult.get() out.append(ans) - ks, P = compute_storage( out ) + ks, P = compute_storage(out) assert len(ks) == len(ds.scans) - mem = pks_table( P ) + mem = pks_table(P) shm = mem.export() # workers fill the shared memory for i in range(NPROC): - qshm.put( shm ) + qshm.put(shm) dones = set() for i in tqdm.tqdm(range(NPROC)): # check done f = qresult.get() - dones.add( f ) + dones.add(f) if len(dones) == NPROC: break return out, ks, P, mem -default_options = { 'algorithm' : 'lmlabel', - 'wtmax' : None, # value to replace saturated pixels - 'save_overlaps': False, - } - -def main( dsname, sparsename, pkname, options = default_options ): +default_options = { + "algorithm": "lmlabel", + "wtmax": None, # value to replace saturated pixels + "save_overlaps": False, +} + + +def main(dsname, sparsename, pkname, options=default_options): if os.path.exists(pkname): - logging.warning("Your output file already exists. May fail. %s"%(pkname)) + logging.warning("Your output file already exists. May fail. %s" % (pkname)) global NPROC, omega try: rmem = None t = tictoc() ds = ImageD11.sinograms.dataset.load(dsname) omega = ds.omega - t('read ds %s'%(dsname)) + t("read ds %s" % (dsname)) nscans = len(ds.scans) - if NPROC > nscans-1: - NPROC = max(1,nscans-1) - print('Nscans',nscans,'NPROC', NPROC) - print('Options',options) + if NPROC > nscans - 1: + NPROC = max(1, nscans - 1) + print("Nscans", nscans, "NPROC", NPROC) + print("Options", options) peaks, ks, P, rmem = goforit(ds, sparsename, options) - t('%d label and pair'%(len(rmem.pk_props[0]))) - if 'save_overlaps' in options and options['save_overlaps']: - rmem.save( pkname + '_mat.h5' , rc=True) - t('cache') + t("%d label and pair" % (len(rmem.pk_props[0]))) + if "save_overlaps" in options and options["save_overlaps"]: + rmem.save(pkname + "_mat.h5", rc=True) + t("cache") cc = rmem.find_uniq() - t('%s connected components'%(str(cc[0]))) - rmem.save( pkname ) - t('write hdf5') + t("%s connected components" % (str(cc[0]))) + rmem.save(pkname) + t("write hdf5") except Exception as e: - print('Unhandled exception:',e) + print("Unhandled exception:", e) raise finally: if rmem is not None: @@ -844,20 +915,19 @@ def main( dsname, sparsename, pkname, options = default_options ): return - -if __name__=="__main__": +if __name__ == "__main__": dsname = sys.argv[1] sparsename = sys.argv[2] pkname = sys.argv[3] - algorithm = 'lmlabel' + algorithm = "lmlabel" options = default_options - if len(sys.argv)>=5: - options['algorithm'] = sys.argv[4] + if len(sys.argv) >= 5: + options["algorithm"] = sys.argv[4] - main( dsname, sparsename, pkname, options ) + main(dsname, sparsename, pkname, options) print("Your stuff left in shm:") - os.system("ls -l /dev/shm | grep %s"%(os.environ['USER'])) + os.system("ls -l /dev/shm | grep %s" % (os.environ["USER"])) print("... all done") diff --git a/ImageD11/sinograms/roi_iradon.py b/ImageD11/sinograms/roi_iradon.py index 27a37e79..17cc6b20 100644 --- a/ImageD11/sinograms/roi_iradon.py +++ b/ImageD11/sinograms/roi_iradon.py @@ -1,4 +1,3 @@ - """This code came from skimage.transform and then was modified - Added mask ROI for back projection. Optimisation for small grains @@ -43,9 +42,9 @@ from scipy.fft import fft, ifft, fftfreq, fftshift from scipy.interpolate import interp1d import numpy as np -import concurrent.futures import skimage.transform.radon_transform + def _sinogram_pad(n, o=None): if o is None: diagonal = int(np.ceil(np.sqrt(2) * n)) @@ -58,11 +57,15 @@ def _sinogram_pad(n, o=None): pad_width = ((pad_before, pad - pad_before), (0, 0)) return pad_width + def _get_fourier_filter(size, filter_name): - """Construct the Fourier filter. - """ - n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=int), - np.arange(size / 2 - 1, 0, -2, dtype=int))) + """Construct the Fourier filter.""" + n = np.concatenate( + ( + np.arange(1, size / 2 + 1, 2, dtype=int), + np.arange(size / 2 - 1, 0, -2, dtype=int), + ) + ) f = np.zeros(size) f[0] = 0.25 f[1::2] = -1 / (np.pi * n) ** 2 @@ -70,7 +73,7 @@ def _get_fourier_filter(size, filter_name): # Computing the ramp filter from the fourier transform of its # frequency domain representation lessens artifacts and removes a # small bias as explained in [1], Chap 3. Equation 61 - fourier_filter = 2 * np.real(fft(f)) # ramp filter + fourier_filter = 2 * np.real(fft(f)) # ramp filter if filter_name == "ramp": pass elif filter_name == "shepp-logan": @@ -91,16 +94,18 @@ def _get_fourier_filter(size, filter_name): # skimage.transform.iradon -def iradon(radon_image, theta, - output_size=None, - filter_name="ramp", - interpolation="linear", - projection_shifts=None, - mask = None, - workers = 1, - ): +def iradon( + radon_image, + theta, + output_size=None, + filter_name="ramp", + interpolation="linear", + projection_shifts=None, + mask=None, + workers=1, +): """Inverse radon transform. From skimage.transform. Simplified then ruined. - + - allow projection offset/shifts to be used 1D = constant offset for this projection (why?) 2D = offset versus dty for this projection @@ -109,36 +114,37 @@ def iradon(radon_image, theta, """ angles_count = len(theta) if angles_count != radon_image.shape[1]: - raise ValueError("The given ``theta`` does not match the number of " - "projections in ``radon_image``.") + raise ValueError( + "The given ``theta`` does not match the number of " + "projections in ``radon_image``." + ) if output_size is None: output_size = radon_image.shape[0] to_pad = _sinogram_pad(radon_image.shape[0], output_size) if projection_shifts is not None: assert projection_shifts.shape == radon_image.shape - projection_shifts = np.pad( projection_shifts, to_pad, - mode='constant', constant_values=0 ) - radon_image = np.pad( radon_image, to_pad, - mode='constant', constant_values=0 ) + projection_shifts = np.pad( + projection_shifts, to_pad, mode="constant", constant_values=0 + ) + radon_image = np.pad(radon_image, to_pad, mode="constant", constant_values=0) img_shape = radon_image.shape[0] - + # Resize image to next power of two (but no less than 64) for # Fourier analysis; speeds up Fourier and lessens artifacts projection_size_padded = max(64, int(2 ** np.ceil(np.log2(2 * img_shape)))) pad_width = ((0, projection_size_padded - img_shape), (0, 0)) - img = np.pad(radon_image, pad_width, mode='constant', constant_values=0) - #return img + img = np.pad(radon_image, pad_width, mode="constant", constant_values=0) + # return img # Apply filter in Fourier domain fourier_filter = _get_fourier_filter(projection_size_padded, filter_name) projection = fft(img, axis=0, workers=workers) * fourier_filter radon_filtered = np.real(ifft(projection, axis=0, workers=workers)[:img_shape, :]) # Reconstruct image by interpolation - reconstructed = np.zeros((output_size, output_size), - dtype=radon_image.dtype) + reconstructed = np.zeros((output_size, output_size), dtype=radon_image.dtype) radius = output_size // 2 xpr, ypr = np.mgrid[:output_size, :output_size] - radius - + # TODO: make this part threaded - one thread per tile if mask is not None: xpr = xpr[mask] @@ -148,20 +154,23 @@ def iradon(radon_image, theta, recm = reconstructed # print('img_shape.shape',img_shape) x = np.arange(img_shape) - img_shape // 2 - rtheta = np.deg2rad( theta ) + rtheta = np.deg2rad(theta) - for i in range(angles_count): # most of the time is in this loop + for i in range(angles_count): # most of the time is in this loop t = ypr * np.cos(rtheta[i]) - xpr * np.sin(rtheta[i]) if projection_shifts is not None: - xi = x + projection_shifts.T[i] # measured positions are shifted + xi = x + projection_shifts.T[i] # measured positions are shifted else: xi = x - interpolant = interp1d(xi, radon_filtered[:,i], - kind=interpolation, - copy=False, - assume_sorted=True, - bounds_error=False, - fill_value=0) + interpolant = interp1d( + xi, + radon_filtered[:, i], + kind=interpolation, + copy=False, + assume_sorted=True, + bounds_error=False, + fill_value=0, + ) recm += interpolant(t) recm *= np.pi / (2 * angles_count) if mask is not None: @@ -169,28 +178,31 @@ def iradon(radon_image, theta, return reconstructed -#TODO : fixme +# TODO : fixme # It would be 'nice' for the radon transform to be closer to iradon # in using the same xpr/ypr/shifts formula. That will give pixel co-ords -# for the 1D projections. The problem is to then 'cut up' the pixels on +# for the 1D projections. The problem is to then 'cut up' the pixels on # a 1D projection (so trapezium integrations). Doing bilinear interpolation # gave nasty artifacts at 45 degrees angle. This needs a bit of thought. # -# 4 corners. +# 4 corners. # Sort into order(?) or just abs(sin), abs(cos)? # First triangle. Middle Part. Last triangle. # -def radon(image, theta, - output_size=None, # sinogram width - projection_shifts=None, - mask = None, - workers = 1, - ): + +def radon( + image, + theta, + output_size=None, # sinogram width + projection_shifts=None, + mask=None, + workers=1, +): """ From skimage.transform. Modified to have projection shifts and roi to match the masked iradon here - + Calculates the radon transform of an image given specified projection angles. @@ -225,53 +237,52 @@ def radon(image, theta, """ if image.ndim != 2: - raise ValueError('The input image must be 2-D') + raise ValueError("The input image must be 2-D") assert image.dtype == np.float32 assert len(image.shape) == 2 assert image.shape[0] == image.shape[1] - + if output_size is None: output_size = image.shape[0] - + if projection_shifts is not None: - assert projection_shifts.shape[1] == len( theta ) + assert projection_shifts.shape[1] == len(theta) assert projection_shifts.shape[0] == output_size - - radius = output_size // 2 + + radius = output_size // 2 # padding the image. Shall we bother? Apparently yes. pad = [int(np.ceil(output_size - s)) for s in image.shape] new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)] old_center = [s // 2 for s in image.shape] pad_before = [nc - oc for oc, nc in zip(old_center, new_center)] pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)] - padded_image = np.pad(image, pad_width, mode='constant', - constant_values=0) + padded_image = np.pad(image, pad_width, mode="constant", constant_values=0) # padded_image is always square if padded_image.shape[0] != padded_image.shape[1]: - raise ValueError('padded_image must be a square') + raise ValueError("padded_image must be a square") center = padded_image.shape[0] // 2 - radon_image = np.zeros((padded_image.shape[0], len(theta)), - dtype=image.dtype) + radon_image = np.zeros((padded_image.shape[0], len(theta)), dtype=image.dtype) angles_count = len(theta) - rtheta = np.deg2rad( theta ) - - for i in range(angles_count): # most of the time is in this loop + rtheta = np.deg2rad(theta) + + for i in range(angles_count): # most of the time is in this loop if projection_shifts is not None: - dx = projection_shifts.T[i] # measured positions are shifted + dx = projection_shifts.T[i] # measured positions are shifted else: dx = None - rotated = skimage.transform.radon_transform.warp( padded_image, fxyrot, - map_args={'angle': rtheta[i], - 'center': center, - 'projection_shifts': dx }, - clip=False) + rotated = skimage.transform.radon_transform.warp( + padded_image, + fxyrot, + map_args={"angle": rtheta[i], "center": center, "projection_shifts": dx}, + clip=False, + ) radon_image[:, i] = rotated.sum(0) return radon_image -def fxyrot( colrow, angle=0, center=0, projection_shifts = None ): +def fxyrot(colrow, angle=0, center=0, projection_shifts=None): # apply the projection shifts in reverse # t = ypr * np.cos(rtheta[i]) - xpr * np.sin(rtheta[i]) # if projection_shifts is not None: @@ -280,15 +291,15 @@ def fxyrot( colrow, angle=0, center=0, projection_shifts = None ): # xi = x col, row = colrow.T - center n = int(np.sqrt(col.shape[0])) - assert n*n == col.shape[0] - col.shape = n,n - row.shape = n,n + assert n * n == col.shape[0] + col.shape = n, n + row.shape = n, n cos_a, sin_a = np.cos(angle), np.sin(angle) if projection_shifts is not None: ct = col.T ct += projection_shifts - x = cos_a*col + sin_a*row - y = -sin_a*col + cos_a*row - colrow[:,0] = x.ravel() - colrow[:,1] = y.ravel() + x = cos_a * col + sin_a * row + y = -sin_a * col + cos_a * row + colrow[:, 0] = x.ravel() + colrow[:, 1] = y.ravel() return colrow + center diff --git a/ImageD11/sinograms/sinogram2crysalis.py b/ImageD11/sinograms/sinogram2crysalis.py index 86b07eb0..deea3987 100644 --- a/ImageD11/sinograms/sinogram2crysalis.py +++ b/ImageD11/sinograms/sinogram2crysalis.py @@ -1,4 +1,3 @@ - """WARNING: work in in progress""" from __future__ import print_function @@ -11,216 +10,234 @@ import ImageD11.sinograms.lima_segmenter import fabio.app.eiger2crysalis, fabio.limaimage import bslz4_to_sparse -import tqdm import concurrent.futures import threading import hdf5plugin readlock = threading.Lock() -# Dummy object +# Dummy object class empty: pass - + + # Dummy fabio image for the data -class SinoScan( fabio.limaimage.LimaImage ): - """ Inherit limaimage to be allowed by converter """ +class SinoScan(fabio.limaimage.LimaImage): + """Inherit limaimage to be allowed by converter""" + def __init__(self, dataset, num=0): self.header = {} self.dataset = dataset self.data = self.dataset[num] self._nframes = len(self.dataset) self.h5 = empty() # fake this out - self.h5.attrs = {'default': None} - + self.h5.attrs = {"default": None} + def close(self): pass - + def __iter__(self): o = empty() o.header = {} for o.data in self.dataset: yield o - - + + def name_decorator(method): - def decorate_name(self = None): + def decorate_name(self=None): headers = method(self) print("Patching header") headers["drealpixelsizex"] = 0.075 headers["drealpixelsizey"] = 0.075 - headers["dexposuretimeinsec"] = 0.1 # fixme + headers["dexposuretimeinsec"] = 0.1 # fixme return headers + return decorate_name -if not hasattr( fabio.app.eiger2crysalis.Converter, 'patched' ): - fabio.app.eiger2crysalis.Converter.common_headers = name_decorator(fabio.app.eiger2crysalis.Converter.common_headers) - + +if not hasattr(fabio.app.eiger2crysalis.Converter, "patched"): + fabio.app.eiger2crysalis.Converter.common_headers = name_decorator( + fabio.app.eiger2crysalis.Converter.common_headers + ) + # patch the converter headers fabio.app.eiger2crysalis.Converter.patched = True - + @numba.njit(nogil=True) -def accumulate( npixels, values, indices, dest, num ): +def accumulate(npixels, values, indices, dest, num): d0 = dest[0].size * num for i in range(npixels): - dest.flat[ d0 + indices[i] ] += values[i] + dest.flat[d0 + indices[i]] += values[i] + @numba.njit(parallel=True) -def padd( values, destination ): - for i in numba.prange( values.size ): +def padd(values, destination): + for i in numba.prange(values.size): destination.flat[i] += values.flat[i] - -def integrate_sino( ds, image_mask, sinomask=None, nomega = 10): + +def integrate_sino(ds, image_mask, sinomask=None, nomega=10): """ ds = ImageD11.sinograms.dataset that describes scandata sinomask = which frames to use nomega = number of omegasteps to combine """ - global readlock - + global readlock + if sinomask is not None: assert sinomask.shape == ds.shape else: - sinomask = np.ones( ds.shape, bool ) - + sinomask = np.ones(ds.shape, bool) + # inverse of omega step size - istep = (len(ds.obincens)-1)/(ds.obincens[-1]-ds.obincens[0]) + istep = (len(ds.obincens) - 1) / (ds.obincens[-1] - ds.obincens[0]) # which omega bin is this frame ? - obin = np.round( istep * (ds.omega - ds.obincens[0])).astype(int) + obin = np.round(istep * (ds.omega - ds.obincens[0])).astype(int) nout = ds.shape[1] // nomega - destination = np.zeros( (nout, 2162, 2068), np.uint32 ) + destination = np.zeros((nout, 2162, 2068), np.uint32) address = 0 npx = 0 dt = None - for num_frames, sourcefile in zip( ds.frames_per_file, ds.imagefiles ): - todo = np.nonzero( sinomask.flat[address : address + num_frames] )[0] + for num_frames, sourcefile in zip(ds.frames_per_file, ds.imagefiles): + todo = np.nonzero(sinomask.flat[address : address + num_frames])[0] if len(todo) > 0: with readlock: - with h5py.File( os.path.join(ds.datapath, sourcefile), 'r' ) as hin: - dset = hin[ ds.limapath ] + with h5py.File(os.path.join(ds.datapath, sourcefile), "r") as hin: + dset = hin[ds.limapath] dt = dset.dtype chunks = [] for t in todo: num = obin.flat[address + t] // nomega try: - chunks.append( (num, dset.id.read_direct_chunk((t,0,0))[1]) ) + chunks.append( + (num, dset.id.read_direct_chunk((t, 0, 0))[1]) + ) except: - print("FAIL",ds.datapath, sourcefile,t,dset) + print("FAIL", ds.datapath, sourcefile, t, dset) raise - print('.' ,end='') + print(".", end="") dc = bslz4_to_sparse.chunk2sparse(image_mask, dt) - for t,(o,b) in zip(todo, chunks): - npx, (vals, inds ) = dc( b, 0 ) - accumulate( npx, vals, inds, destination, o ) - print(',', end='') + for t, (o, b) in zip(todo, chunks): + npx, (vals, inds) = dc(b, 0) + accumulate(npx, vals, inds, destination, o) + print(",", end="") address += num_frames - print('/',end='') + print("/", end="") return destination - -def makecmdline( - wvln = 12.3985 / 43.569, - xbeam = 1024, - ybeam = 1024, - omegastart = 90, - step = 0.1, - distance = 140, - savepath = '.', - run = 0, - ): - return "eiger2crysalis /dev/null -w %f --beam %.2f %.2f --omega=-%d+index*%f --distance=%.3f -o %s/esperanto/frame_%d_{index}.esperanto" %( - wvln, xbeam, ybeam, omegastart, step, distance, savepath, run) - - -def sum_sinogram( dsname, output_name, nomega = 10, nthreads = None ): + +def makecmdline( + wvln=12.3985 / 43.569, + xbeam=1024, + ybeam=1024, + omegastart=90, + step=0.1, + distance=140, + savepath=".", + run=0, +): + return ( + "eiger2crysalis /dev/null -w %f --beam %.2f %.2f --omega=-%d+index*%f --distance=%.3f -o %s/esperanto/frame_%d_{index}.esperanto" + % (wvln, xbeam, ybeam, omegastart, step, distance, savepath, run) + ) + + +def sum_sinogram(dsname, output_name, nomega=10, nthreads=None): if nthreads is None: - nthreads = min(max( len(os.sched_getaffinity( os.getpid() )) - 2, 1 ), 20) - ds = ImageD11.sinograms.dataset.load( dsname ) - with h5py.File(dsname ,'r') as hin: - image_mask = fabio.open(hin['lima_segmenter'].attrs['maskfile']).data + nthreads = min(max(len(os.sched_getaffinity(os.getpid())) - 2, 1), 20) + ds = ImageD11.sinograms.dataset.load(dsname) + with h5py.File(dsname, "r") as hin: + image_mask = fabio.open(hin["lima_segmenter"].attrs["maskfile"]).data # ds.import_nnz() - print("Going to sum sinogram of", dsname, 'output to', output_name) - with concurrent.futures.ThreadPoolExecutor( max_workers=nthreads ) as pool: + print("Going to sum sinogram of", dsname, "output to", output_name) + with concurrent.futures.ThreadPoolExecutor(max_workers=nthreads) as pool: args = [] for i in range(nthreads): - rows = np.zeros( ds.shape, bool ) + rows = np.zeros(ds.shape, bool) rows[i::nthreads] = True - args.append( (ds, image_mask, rows, nomega) ) - + args.append((ds, image_mask, rows, nomega)) + def fun(args): - localds, image_mask, sinomask, nomega = args - return integrate_sino( localds, image_mask, sinomask=sinomask, nomega=nomega ) - + localds, image_mask, sinomask, nomega = args + return integrate_sino(localds, image_mask, sinomask=sinomask, nomega=nomega) + spx = None for ans in pool.map(fun, args): if spx is None: spx = ans else: - padd( ans, spx ) + padd(ans, spx) # fill mask? - mval = pow(2,32)-1 - for i,frm in enumerate(spx): - spx[i] = np.where( image_mask, spx[i], mval ) - - with h5py.File( output_name, 'w' ) as hout: - ds = hout.create_dataset('data', - shape = spx.shape, - dtype = spx.dtype, - data = spx, - chunks = (1,spx.shape[1],spx.shape[2]), - **hdf5plugin.Bitshuffle(nelems=0, lz4=True) - ) + mval = pow(2, 32) - 1 + for i, frm in enumerate(spx): + spx[i] = np.where(image_mask, spx[i], mval) + + with h5py.File(output_name, "w") as hout: + ds = hout.create_dataset( + "data", + shape=spx.shape, + dtype=spx.dtype, + data=spx, + chunks=(1, spx.shape[1], spx.shape[2]), + **hdf5plugin.Bitshuffle(nelems=0, lz4=True) + ) return spx -def makecmdline( - wvln = 12.3985 / 43.569, - xbeam = 1024, - ybeam = 1024, - omegastart = 90, - step = 0.1, - distance = 140, - savepath = '.', - run = 1, - ): - print("cp /data/id11/nanoscope/Eiger/frame_1_.set %s/frame.set"%(savepath)) - return "eiger2crysalis sum_lpcmoX1_slice1.h5 -w %f --flip-lr --calc-mask False --beam %.2f %.2f --omega=-%d+index*%f --distance=%.3f -o %s/frame_%d_{index}.esperanto" %( - wvln, xbeam, ybeam, omegastart, step, distance, savepath, run) - -if __name__=="__main__": - sample = 'lpcmo_x3' - dset = 'z50_RT' - dsname = '../ds_{sample}_{dset}.h5'.format(**locals()) +def makecmdline( + wvln=12.3985 / 43.569, + xbeam=1024, + ybeam=1024, + omegastart=90, + step=0.1, + distance=140, + savepath=".", + run=1, +): + print("cp /data/id11/nanoscope/Eiger/frame_1_.set %s/frame.set" % (savepath)) + return ( + "eiger2crysalis sum_lpcmoX1_slice1.h5 -w %f --flip-lr --calc-mask False --beam %.2f %.2f --omega=-%d+index*%f --distance=%.3f -o %s/frame_%d_{index}.esperanto" + % (wvln, xbeam, ybeam, omegastart, step, distance, savepath, run) + ) + + +if __name__ == "__main__": + sample = "lpcmo_x3" + dset = "z50_RT" + dsname = "../ds_{sample}_{dset}.h5".format(**locals()) guessroot = os.getcwd() - if guessroot.startswith( '/data' ): - items = guessroot.split('/') - id11 = items.index('id11') - dataroot = "/".join( items[:id11+1] ) + "/RAW_DATA" + if guessroot.startswith("/data"): + items = guessroot.split("/") + id11 = items.index("id11") + dataroot = "/".join(items[: id11 + 1]) + "/RAW_DATA" print(dataroot) if not os.path.exists(dsname): - ds = ImageD11.sinograms.dataset.DataSet( - dataroot = '/data/visitor/hc5185/id11/20230505/RAW_DATA', - analysisroot = os.getcwd(), - sample = sample, - dset = dset, ) + ds = ImageD11.sinograms.dataset.DataSet( + dataroot="/data/visitor/hc5185/id11/20230505/RAW_DATA", + analysisroot=os.getcwd(), + sample=sample, + dset=dset, + ) ds.import_all() ds.save(dsname) ImageD11.sinograms.lima_segmenter.setup(dsname) - - spx = sum_sinogram( dsname, "sum_{sample}_{dset}.h5".format(**locals()), nthreads = 20) + + spx = sum_sinogram(dsname, "sum_{sample}_{dset}.h5".format(**locals()), nthreads=20) # Dummy file opener - def myopenimage( filename ): + def myopenimage(filename): global spx - '''flips to reverse order - need to fix this properly ... ''' - return SinoScan( spx[::-1] ) + """flips to reverse order - need to fix this properly ... """ + return SinoScan(spx[::-1]) # replace the file opener by our thing fabio.app.eiger2crysalis.fabio_open = myopenimage - cmd = makecmdline(savepath='lpcmoX1_slice1', step=0.5, ) + cmd = makecmdline( + savepath="lpcmoX1_slice1", + step=0.5, + ) sys.argv = cmd.split() - fabio.app.eiger2crysalis.main() \ No newline at end of file + fabio.app.eiger2crysalis.main() diff --git a/ImageD11/sparseframe.py b/ImageD11/sparseframe.py index bb4e4882..a22965b9 100644 --- a/ImageD11/sparseframe.py +++ b/ImageD11/sparseframe.py @@ -1,8 +1,6 @@ - from __future__ import print_function, division -import time, sys -import h5py, scipy.sparse, numpy as np #, pylab as pl +import h5py, scipy.sparse, numpy as np # , pylab as pl from ImageD11 import cImageD11 SAFE = True @@ -10,23 +8,23 @@ # see also sandbox/harvest_pixels.py NAMES = { - "filename" : "original filename used to create a sparse frame", - "intensity" : "corrected pixel values", + "filename": "original filename used to create a sparse frame", + "intensity": "corrected pixel values", "nlabel": "Number of unique labels for an image labelling", - "threshold" : "Cut off used for thresholding", - } + "threshold": "Cut off used for thresholding", +} -class sparse_frame( object ): +class sparse_frame(object): """ Indices / shape mapping This was developed for a single 2D frame See SparseScan below for something aiming towards many frames """ - def __init__(self, row, col, shape, itype=np.uint16, pixels=None, - SAFE=SAFE ): - """ row = slow direction + + def __init__(self, row, col, shape, itype=np.uint16, pixels=None, SAFE=SAFE): + """row = slow direction col = fast direction shape = size of full image itype = the integer type to store the indices @@ -36,10 +34,10 @@ def __init__(self, row, col, shape, itype=np.uint16, pixels=None, throw in a ary.attrs if you want to save some """ if SAFE: - self.check( row, col, shape, itype, SAFE ) + self.check(row, col, shape, itype, SAFE) self.shape = shape - self.row = np.asarray(row, dtype = itype ) - self.col = np.asarray(col, dtype = itype ) + self.row = np.asarray(row, dtype=itype) + self.col = np.asarray(col, dtype=itype) self.nnz = len(self.row) # Things we could have using those indices: # raw pixel intensities @@ -54,7 +52,11 @@ def __init__(self, row, col, shape, itype=np.uint16, pixels=None, self.pixels[name] = val def __repr__(self): - h = "Sparse Frame ( %d , %d ) nnz = %d, data: "%( self.shape[0], self.shape[1], self.nnz ) + h = "Sparse Frame ( %d , %d ) nnz = %d, data: " % ( + self.shape[0], + self.shape[1], + self.nnz, + ) h += " ".join(list(self.pixels.keys())) return h @@ -73,15 +75,14 @@ def __eq__(self, other): return False for k in self.pixels.keys(): if not (self.pixels[k] == other.pixels[k]).all(): - print("pixels mismatch",k) + print("pixels mismatch", k) print(self.pixels[k]) print(other.pixels[k]) return False return True - def check(self, row, col, shape, itype, SAFE=SAFE): - """ Ensure the index data makes sense and fits """ + """Ensure the index data makes sense and fits""" if SAFE: lo = np.iinfo(itype).min hi = np.iinfo(itype).max @@ -93,126 +94,136 @@ def check(self, row, col, shape, itype, SAFE=SAFE): assert len(row) == len(col) def is_sorted(self): - """ Tests whether the data are sorted into slow/fast order + """Tests whether the data are sorted into slow/fast order rows are slow direction - columns are fast """ + columns are fast""" # TODO: non uint16 cases - assert self.row.dtype == np.uint16 and \ - cImageD11.sparse_is_sorted( self.row, self.col ) == 0 + assert ( + self.row.dtype == np.uint16 + and cImageD11.sparse_is_sorted(self.row, self.col) == 0 + ) def to_dense(self, data=None, out=None): - """ returns the full 2D image + """returns the full 2D image data = name in self.pixels or 1D array matching self.nnz Does not handle repeated indices e.g. obj.to_dense( obj.pixels['raw_intensity'] ) """ if data in self.pixels: - data = self.pixels[data] # give back this array + data = self.pixels[data] # give back this array else: - ks = list( self.pixels.keys() ) - if len(ks)==1: - data = self.pixels[ks[0]] # default for only one + ks = list(self.pixels.keys()) + if len(ks) == 1: + data = self.pixels[ks[0]] # default for only one else: - data = np.ones( self.nnz, bool ) # give a mask + data = np.ones(self.nnz, bool) # give a mask if out is None: - out = np.zeros( self.shape, data.dtype ) + out = np.zeros(self.shape, data.dtype) else: assert out.shape == self.shape assert len(data) == self.nnz - scipy.sparse.coo_matrix((data, (self.row, self.col)), shape=(self.shape)).todense(out=out) + scipy.sparse.coo_matrix( + (data, (self.row, self.col)), shape=(self.shape) + ).todense(out=out) # does not handle duplicate indices if they were present: # adr = self.row.astype(np.intp) * self.shape[1] + self.col # out.flat[adr] = data return out - def mask( self, msk ): - """ returns a subset of itself """ - spf = sparse_frame( self.row[msk], - self.col[msk], - self.shape, self.row.dtype ) + def mask(self, msk): + """returns a subset of itself""" + spf = sparse_frame(self.row[msk], self.col[msk], self.shape, self.row.dtype) for name, px in self.pixels.items(): if name in self.meta: m = self.meta[name].copy() else: m = None - spf.set_pixels( name, px[msk], meta = m ) + spf.set_pixels(name, px[msk], meta=m) return spf - def set_pixels( self, name, values, meta=None ): - """ Named arrays sharing these labels """ - if SAFE: assert len(values) == self.nnz + def set_pixels(self, name, values, meta=None): + """Named arrays sharing these labels""" + if SAFE: + assert len(values) == self.nnz self.pixels[name] = values if meta is not None: self.meta[name] = meta + def sort_by(self, name): + """Not sure when you would do this. For sorting + by a peak labelling to get pixels per peak""" + order = np.argsort(self.pixels[name]) + self.reorder(self, order) - def sort_by( self, name ): - """ Not sure when you would do this. For sorting - by a peak labelling to get pixels per peak """ - order = np.argsort( self.pixels[name] ) - self.reorder( self, order ) + def sort(self): + """Puts you into slow / fast looping order""" + order = np.lexsort((self.col, self.row)) + self.reorder(self, order) - def sort( self ): - """ Puts you into slow / fast looping order """ - order = np.lexsort( ( self.col, self.row ) ) - self.reorder( self, order ) - - def reorder( self, order ): - """ Put the pixels into a different order (in place) """ - if SAFE: assert len(order) == self.nnz + def reorder(self, order): + """Put the pixels into a different order (in place)""" + if SAFE: + assert len(order) == self.nnz self.row[:] = self.row[order] self.col[:] = self.col[order] for name, px in self.pixels.items(): px[:] = px[order] - def threshold(self, threshold, name='intensity'): + def threshold(self, threshold, name="intensity"): """ returns a new sparse frame with pixels > threshold """ - return self.mask( self.pixels[name] > threshold ) + return self.mask(self.pixels[name] > threshold) - def to_hdf_group( frame, group ): - """ Save a 2D sparse frame to a hdf group + def to_hdf_group(frame, group): + """Save a 2D sparse frame to a hdf group Makes 1 single frame per group """ - itype = np.dtype( frame.row.dtype ) - meta = { "itype" : itype.name, - "shape0" : frame.shape[0], - "shape1" : frame.shape[1] } + itype = np.dtype(frame.row.dtype) + meta = {"itype": itype.name, "shape0": frame.shape[0], "shape1": frame.shape[1]} for name, value in meta.items(): group.attrs[name] = value - opts = { "compression": "lzf", - "shuffle" : True, - } - #opts = {} - group.require_dataset( "row", shape=(frame.nnz,), - dtype=itype, **opts ) - group.require_dataset( "col", shape=(frame.nnz,), - dtype=itype, **opts ) - group['row'][:] = frame.row - group['col'][:] = frame.col + opts = { + "compression": "lzf", + "shuffle": True, + } + # opts = {} + group.require_dataset("row", shape=(frame.nnz,), dtype=itype, **opts) + group.require_dataset("col", shape=(frame.nnz,), dtype=itype, **opts) + group["row"][:] = frame.row + group["col"][:] = frame.col for pxname, px in frame.pixels.items(): - group.require_dataset( pxname, shape=(frame.nnz,), - dtype=px.dtype, - **opts ) + group.require_dataset(pxname, shape=(frame.nnz,), dtype=px.dtype, **opts) group[pxname][:] = px if pxname in frame.meta: - group[pxname].attrs = dict( frame.meta[pxname] ) - - -omeganames = ['measurement/rot_center', 'measurement/rot', - 'measurement/diffrz_center', 'measurement/diffrz'] -dtynames = ['measurement/dty_center', 'measurement/dty', - 'measurement/diffty_center', 'measurement/diffty'] - - -class SparseScan( object ): - - - def __init__( self, hname, scan, start = 0, n=None, - names = ('row','col','intensity'), - omeganames = omeganames, - dtynames = dtynames ): + group[pxname].attrs = dict(frame.meta[pxname]) + + +omeganames = [ + "measurement/rot_center", + "measurement/rot", + "measurement/diffrz_center", + "measurement/diffrz", +] +dtynames = [ + "measurement/dty_center", + "measurement/dty", + "measurement/diffty_center", + "measurement/diffty", +] + + +class SparseScan(object): + def __init__( + self, + hname, + scan, + start=0, + n=None, + names=("row", "col", "intensity"), + omeganames=omeganames, + dtynames=dtynames, + ): """ hname : file coming from a sparse segmentation scan : a scan within that file @@ -228,55 +239,62 @@ def __init__( self, hname, scan, start = 0, n=None, self.names = list(names) self.omeganames = list(omeganames) self.dtynames = list(dtynames) - if scan.find('::') >= 0: # Format is "1.1::[start:end]" + if scan.find("::") >= 0: # Format is "1.1::[start:end]" scan, indexes = scan.split("::") - start, end = [int(s) for s in indexes[1:-1].split(':')] - n = end - start - with h5py.File(hname,"r") as hin: + start, end = [int(s) for s in indexes[1:-1].split(":")] + n = end - start + with h5py.File(hname, "r") as hin: grp = hin[scan] - self.shape = tuple( [ int(v) for v in ( grp.attrs['nframes'], - grp.attrs['shape0'], - grp.attrs['shape1'] ) ] ) + self.shape = tuple( + [ + int(v) + for v in ( + grp.attrs["nframes"], + grp.attrs["shape0"], + grp.attrs["shape1"], + ) + ] + ) if n is None: - end = self.shape[0] # nframes + end = self.shape[0] # nframes else: end = start + n - self.shape = end-start, self.shape[1], self.shape[2] + self.shape = end - start, self.shape[1], self.shape[2] # read the motors - if any self.motors = {} - for name, motors in [ ('omega',self.omeganames), - ('dty',self.dtynames) ]: + for name, motors in [("omega", self.omeganames), ("dty", self.dtynames)]: for motor in motors: if motor in grp: - self.motors[ name ] = grp[motor][start:end] + self.motors[name] = grp[motor][start:end] break # read the pixels - all pointers - nnz = grp['nnz'][:] - ipt = np.concatenate( ( (0,) , np.cumsum(nnz, dtype=int) ) ) + nnz = grp["nnz"][:] + ipt = np.concatenate(((0,), np.cumsum(nnz, dtype=int))) s = ipt[start] e = ipt[end] for name in self.names: if name in grp: - setattr( self, name, grp[name][s:e] ) + setattr(self, name, grp[name][s:e]) # pointers into this scan self.nnz = nnz[start:end] - self.ipt = np.concatenate( ( (0,) , np.cumsum(self.nnz, dtype=int) ) ) + self.ipt = np.concatenate(((0,), np.cumsum(self.nnz, dtype=int))) def getframe(self, i, SAFE=SAFE): # (self, row, col, shape, itype=np.uint16, pixels=None): s = self.ipt[i] - e = self.ipt[i+1] + e = self.ipt[i + 1] if s == e: - return None # empty frame - return sparse_frame( self.row[ s: e], - self.col[ s: e], - self.shape[1:], - pixels = { name : getattr( self, name)[s:e] for name in self.names }, - SAFE=SAFE ) - - - def cplabel(self, threshold = 0, countall=True ): - """ Label pixels using the connectedpixels assigment code + return None # empty frame + return sparse_frame( + self.row[s:e], + self.col[s:e], + self.shape[1:], + pixels={name: getattr(self, name)[s:e] for name in self.names}, + SAFE=SAFE, + ) + + def cplabel(self, threshold=0, countall=True): + """Label pixels using the connectedpixels assigment code Fills in: self.nlabels = number of peaks per frame self.labels = peak labels (should be unique) @@ -285,34 +303,35 @@ def cplabel(self, threshold = 0, countall=True ): if countall == True : labels all peaks from zero == False : labels from 1 on each frame """ - self.nlabels = np.zeros( len(self.nnz), np.int32 ) - self.labels = np.zeros( len(self.row), "i") - if 'labels' not in self.names: - self.names.append('labels') + self.nlabels = np.zeros(len(self.nnz), np.int32) + self.labels = np.zeros(len(self.row), "i") + if "labels" not in self.names: + self.names.append("labels") nl = 0 # TODO: run this in parallel with threads? - for i, npx in enumerate( self.nnz ): + for i, npx in enumerate(self.nnz): s = self.ipt[i] - e = self.ipt[i+1] + e = self.ipt[i + 1] if npx > 0: self.nlabels[i] = cImageD11.sparse_connectedpixels( - self.intensity[ s : e ], - self.row[ s : e ], - self.col[ s : e ], + self.intensity[s:e], + self.row[s:e], + self.col[s:e], threshold, - self.labels[ s : e ] ) + self.labels[s:e], + ) # zero label is the background! - self.labels[ s : e ] = np.where( self.labels[ s : e ] > 0, - self.labels[ s : e ] + nl, 0 ) + self.labels[s:e] = np.where( + self.labels[s:e] > 0, self.labels[s:e] + nl, 0 + ) else: self.nlabels[i] = 0 if countall: nl += self.nlabels[i] self.total_labels = self.nlabels.sum() - - def lmlabel(self, threshold = 0, countall=True, smooth=True ): - """ Label pixels using the localmax assigment code + def lmlabel(self, threshold=0, countall=True, smooth=True): + """Label pixels using the localmax assigment code Fills in: self.nlabels = number of peaks per frame self.labels = peak labels (should be unique) @@ -320,38 +339,41 @@ def lmlabel(self, threshold = 0, countall=True, smooth=True ): if countall == True : labels all peaks from zero == False : labels from 1 on each frame """ - self.nlabels = np.zeros( len(self.nnz), np.int32 ) - self.labels = np.zeros( len(self.row), "i") - if 'labels' not in self.names: - self.names.append('labels') + self.nlabels = np.zeros(len(self.nnz), np.int32) + self.labels = np.zeros(len(self.row), "i") + if "labels" not in self.names: + self.names.append("labels") if smooth: - self.signal = np.empty( self.intensity.shape, np.float32 ) + self.signal = np.empty(self.intensity.shape, np.float32) else: self.signal = self.intensity # temporary workspaces npxmax = self.nnz.max() - vmx = np.zeros( npxmax, np.float32 ) - imx = np.zeros( npxmax, 'i' ) + vmx = np.zeros(npxmax, np.float32) + imx = np.zeros(npxmax, "i") nl = 0 # TODO: run this in parallel with threads? - for i, npx in enumerate( self.nnz ): + for i, npx in enumerate(self.nnz): s = self.ipt[i] - e = self.ipt[i+1] + e = self.ipt[i + 1] if npx > 0: if smooth: - cImageD11.sparse_smooth( self.intensity[ s: e], - self.row[s:e], - self.col[s:e], - self.signal[s:e] ) + cImageD11.sparse_smooth( + self.intensity[s:e], + self.row[s:e], + self.col[s:e], + self.signal[s:e], + ) self.nlabels[i] = cImageD11.sparse_localmaxlabel( - self.signal[ s : e ], - self.row[ s : e ], - self.col[ s : e ], + self.signal[s:e], + self.row[s:e], + self.col[s:e], vmx[:npx], imx[:npx], - self.labels[s : e] ) + self.labels[s:e], + ) assert (self.labels[s:e] > 0).all() - self.labels[ s : e ] += nl + self.labels[s:e] += nl else: self.nlabels[i] = 0 if countall: @@ -359,106 +381,106 @@ def lmlabel(self, threshold = 0, countall=True, smooth=True ): self.total_labels = self.nlabels.sum() def moments(self): - """ Computes the center of mass in s/f/omega - """ + """Computes the center of mass in s/f/omega""" pks = {} i32 = self.intensity.astype(np.float32) - pks['Number_of_pixels'] = np.bincount(self.labels, - weights=None, - minlength = self.total_labels+1 )[1:] - pks['sum_intensity'] = np.bincount(self.labels, - weights=i32, - minlength = self.total_labels+1 )[1:] - pks['s_raw'] = np.bincount(self.labels, - weights=i32*self.row, - minlength = self.total_labels+1 )[1:] - pks['s_raw'] /= pks['sum_intensity'] - pks['f_raw'] = np.bincount(self.labels, - weights=i32*self.col, - minlength = self.total_labels+1 )[1:] - pks['f_raw'] /= pks['sum_intensity'] - frame = np.empty( self.row.shape, np.int32 ) + pks["Number_of_pixels"] = np.bincount( + self.labels, weights=None, minlength=self.total_labels + 1 + )[1:] + pks["sum_intensity"] = np.bincount( + self.labels, weights=i32, minlength=self.total_labels + 1 + )[1:] + pks["s_raw"] = np.bincount( + self.labels, weights=i32 * self.row, minlength=self.total_labels + 1 + )[1:] + pks["s_raw"] /= pks["sum_intensity"] + pks["f_raw"] = np.bincount( + self.labels, weights=i32 * self.col, minlength=self.total_labels + 1 + )[1:] + pks["f_raw"] /= pks["sum_intensity"] + frame = np.empty(self.row.shape, np.int32) for i in range(len(self.nnz)): - frame[ self.ipt[i]:self.ipt[i+1] ] = i - for name in 'omega','dty': + frame[self.ipt[i] : self.ipt[i + 1]] = i + for name in "omega", "dty": if name in self.motors: - pks[name] = np.bincount(self.labels, - weights=i32*self.motors[name][frame], - minlength = self.total_labels+1 )[1:] - pks[name] /= pks['sum_intensity'] + pks[name] = np.bincount( + self.labels, + weights=i32 * self.motors[name][frame], + minlength=self.total_labels + 1, + )[1:] + pks[name] /= pks["sum_intensity"] return pks -def from_data_mask( mask, data, header ): +def from_data_mask(mask, data, header): """ Create a sparse from a dense array """ assert mask.shape == data.shape # using uint16 here - perhaps make this general in the future # ... but not for now - assert data.shape[0] < pow(2,16)-1 - assert data.shape[1] < pow(2,16)-1 - nnz = (mask>0).sum() - tmp = np.empty( data.shape[0],'i') # tmp hold px per row cumsums - row = np.empty( nnz, np.uint16 ) - col = np.empty( nnz, np.uint16 ) - cImageD11.mask_to_coo( mask, row, col, tmp ) - intensity = data[ mask > 0 ] + assert data.shape[0] < pow(2, 16) - 1 + assert data.shape[1] < pow(2, 16) - 1 + nnz = (mask > 0).sum() + tmp = np.empty(data.shape[0], "i") # tmp hold px per row cumsums + row = np.empty(nnz, np.uint16) + col = np.empty(nnz, np.uint16) + cImageD11.mask_to_coo(mask, row, col, tmp) + intensity = data[mask > 0] # intensity.attrs = dict(header) # FIXME USE xarray ? - spf = sparse_frame( row, col, data.shape, itype=np.uint16 ) - spf.set_pixels( "intensity" , intensity, dict( header ) ) + spf = sparse_frame(row, col, data.shape, itype=np.uint16) + spf.set_pixels("intensity", intensity, dict(header)) return spf -def from_data_cut( data, cut, header={}, detectormask=None): +def from_data_cut(data, cut, header={}, detectormask=None): assert data.dtype in (np.uint16, np.float32) if detectormask is None: - msk = np.ones(data.shape, bool ) + msk = np.ones(data.shape, bool) else: msk = detectormask - row = np.empty( data.shape, np.uint16 ) - col = np.empty( data.shape, np.uint16 ) + row = np.empty(data.shape, np.uint16) + col = np.empty(data.shape, np.uint16) if data.dtype == np.uint16: - val = np.empty( data.shape, np.uint16 ) - nnz = cImageD11.tosparse_u16( data, msk, row, col, val, cut) + val = np.empty(data.shape, np.uint16) + nnz = cImageD11.tosparse_u16(data, msk, row, col, val, cut) if data.dtype == np.float32: - val = np.empty( data.shape, np.float32 ) - nnz = cImageD11.tosparse_f32( data, msk, row, col, val, cut) - spf = sparse_frame( row.ravel()[:nnz].copy(), - col.ravel()[:nnz].copy(), - data.shape ) - spf.set_pixels( 'intensity', val.ravel()[:nnz].copy(), dict(header) ) + val = np.empty(data.shape, np.float32) + nnz = cImageD11.tosparse_f32(data, msk, row, col, val, cut) + spf = sparse_frame(row.ravel()[:nnz].copy(), col.ravel()[:nnz].copy(), data.shape) + spf.set_pixels("intensity", val.ravel()[:nnz].copy(), dict(header)) return spf - -def from_hdf_group( group ): - itype = np.dtype( group.attrs['itype'] ) - shape = group.attrs['shape0'], group.attrs['shape1'] - row = group['row'][:] # read it - col = group['col'][:] - spf = sparse_frame( row, col, shape, itype=itype ) +def from_hdf_group(group): + itype = np.dtype(group.attrs["itype"]) + shape = group.attrs["shape0"], group.attrs["shape1"] + row = group["row"][:] # read it + col = group["col"][:] + spf = sparse_frame(row, col, shape, itype=itype) for pxname in list(group): if pxname in ["row", "col"]: continue data = group[pxname][:] - header = dict( group[pxname].attrs ) - spf.set_pixels( pxname, data, header ) + header = dict(group[pxname].attrs) + spf.set_pixels(pxname, data, header) return spf -def sparse_moments( frame, intensity_name, labels_name ): - """ We rely on a labelling array carrying nlabel metadata (==labels.data.max())""" - nl = frame.meta[ labels_name ][ "nlabel" ] + +def sparse_moments(frame, intensity_name, labels_name): + """We rely on a labelling array carrying nlabel metadata (==labels.data.max())""" + nl = frame.meta[labels_name]["nlabel"] return cImageD11.sparse_blob2Dproperties( - frame.pixels[intensity_name].astype(np.float32), # limitations of f2py here. + frame.pixels[intensity_name].astype(np.float32), # limitations of f2py here. frame.row, frame.col, frame.pixels[labels_name], - nl ) + nl, + ) class overlaps_linear: - """ Memory caching object for the linear time algorithm to find + """Memory caching object for the linear time algorithm to find peak overlaps Given (row1, col1, label1) and (row2, col2, label2) it finds pixels @@ -466,46 +488,49 @@ class overlaps_linear: and returns (labels1[i], labels2[i], sum_pixels[i]) ... so the number of overlapping pixels for that pair of labels """ - def __init__(self, nnzmax=4096*4): - """ nnzmax = max pixels on a frame """ + + def __init__(self, nnzmax=4096 * 4): + """nnzmax = max pixels on a frame""" self.nnzmax = nnzmax self.realloc() def realloc(self): nnzmax = self.nnzmax - self.ki = np.empty( nnzmax,'i' ) - self.kj = np.empty( nnzmax,'i' ) - self.ect = np.empty( nnzmax, 'i' ) - self.tj = np.empty( nnzmax, 'i' ) - self.tmp = np.empty( nnzmax+1,'i') - - def __call__(self, row1, col1, labels1, n1, - row2, col2, labels2, n2, checkmem=True ): + self.ki = np.empty(nnzmax, "i") + self.kj = np.empty(nnzmax, "i") + self.ect = np.empty(nnzmax, "i") + self.tj = np.empty(nnzmax, "i") + self.tmp = np.empty(nnzmax + 1, "i") + + def __call__(self, row1, col1, labels1, n1, row2, col2, labels2, n2, checkmem=True): if checkmem: - assert len(row1)==len(col1)==len(labels1) - assert len(row2)==len(col2)==len(labels2) - nnz = max( max(len(row1), len(row2)), max(n1,n2)) + assert len(row1) == len(col1) == len(labels1) + assert len(row2) == len(col2) == len(labels2) + nnz = max(max(len(row1), len(row2)), max(n1, n2)) if nnz > self.nnzmax: self.nnzmax = nnz - print("realloc",nnz) + print("realloc", nnz) self.realloc() - npx = cImageD11.sparse_overlaps( row1, col1, self.ki[:len(row1)], - row2, col2, self.kj[:len(row2)] ) - if npx == 0: # there are no overlaps + npx = cImageD11.sparse_overlaps( + row1, col1, self.ki[: len(row1)], row2, col2, self.kj[: len(row2)] + ) + if npx == 0: # there are no overlaps return 0, None - r = labels1[ self.ki[:npx] ] # my labels - c = labels2[ self.kj[:npx] ] # your labels - nedge = cImageD11.compress_duplicates( r, c, self.ect[:npx], self.tj[:npx], self.tmp ) + r = labels1[self.ki[:npx]] # my labels + c = labels2[self.kj[:npx]] # your labels + nedge = cImageD11.compress_duplicates( + r, c, self.ect[:npx], self.tj[:npx], self.tmp + ) # overwrites r/c in place : ignore the zero label (hope it is not there) - rcl = np.zeros( (nedge, 3), 'i') - rcl[:,0] = r[:nedge] - rcl[:,1] = c[:nedge] - rcl[:,2] = self.ect[:nedge] + rcl = np.zeros((nedge, 3), "i") + rcl[:, 0] = r[:nedge] + rcl[:, 1] = c[:nedge] + rcl[:, 2] = self.ect[:nedge] return nedge, rcl class overlaps_matrix: - """ Memory caching object for the quadratic time algorithm to find + """Memory caching object for the quadratic time algorithm to find peak overlaps Given (row1, col1, label1) and (row2, col2, label2) it finds pixels @@ -515,33 +540,30 @@ class overlaps_matrix: This is easier to understand and faster for small number of peaks per frame """ + def __init__(self, npkmax=256): self.npkmax = npkmax self.realloc() def realloc(self): - self.matmem = np.empty( (self.npkmax* self.npkmax,), 'i') + self.matmem = np.empty((self.npkmax * self.npkmax,), "i") # potentially n^2 overlaps. Really? - self.results = np.empty( (3*self.npkmax*self.npkmax), 'i') - - def __call__(self, row1, col1, labels1, n1, - row2, col2, labels2, n2, checkmem=True ): - assert labels1.max()-1 < n1, "%d %d %d"%(labels1.min(), - labels1.max(), - n1) - assert labels2.max()-1 < n2 + self.results = np.empty((3 * self.npkmax * self.npkmax), "i") + + def __call__(self, row1, col1, labels1, n1, row2, col2, labels2, n2, checkmem=True): + assert labels1.max() - 1 < n1, "%d %d %d" % (labels1.min(), labels1.max(), n1) + assert labels2.max() - 1 < n2 mx = max(n1, n2) if max(n1, n2) > self.npkmax: self.npkmax = mx - print("realloc",mx) + print("realloc", mx) self.realloc() - mat = self.matmem[:n1*n2] + mat = self.matmem[: n1 * n2] mat.shape = n1, n2 - nov = cImageD11.coverlaps( row1, col1, labels1, - row2, col2, labels2, - mat, self.results ) - return nov, self.results[:nov*3].reshape((nov,3)) - + nov = cImageD11.coverlaps( + row1, col1, labels1, row2, col2, labels2, mat, self.results + ) + return nov, self.results[: nov * 3].reshape((nov, 3)) def overlaps(frame1, labels1, frame2, labels2): @@ -553,73 +575,60 @@ def overlaps(frame1, labels1, frame2, labels2): label in other (col) number of shared pixels (data) """ - ki = np.empty( frame1.nnz, 'i' ) - kj = np.empty( frame2.nnz, 'i' ) - npx = cImageD11.sparse_overlaps( frame1.row, frame1.col, ki, - frame2.row, frame2.col, kj) + ki = np.empty(frame1.nnz, "i") + kj = np.empty(frame2.nnz, "i") + npx = cImageD11.sparse_overlaps( + frame1.row, frame1.col, ki, frame2.row, frame2.col, kj + ) # self.data and other.data filled during init - row = frame1.pixels[labels1][ ki[:npx] ] # my labels - col = frame2.pixels[labels2][ kj[:npx] ] # your labels - ect = np.empty( npx, 'i') # ect = counts of overlaps - tj = np.empty( npx, 'i') # tj = temporary for sorting - n1 = frame1.meta[labels1][ "nlabel" ] - n2 = frame2.meta[labels2][ "nlabel" ] - tmp = np.empty( max(n1, n2)+1, 'i') # for histogram - nedge = cImageD11.compress_duplicates( row, col, ect, tj, tmp ) + row = frame1.pixels[labels1][ki[:npx]] # my labels + col = frame2.pixels[labels2][kj[:npx]] # your labels + ect = np.empty(npx, "i") # ect = counts of overlaps + tj = np.empty(npx, "i") # tj = temporary for sorting + n1 = frame1.meta[labels1]["nlabel"] + n2 = frame2.meta[labels2]["nlabel"] + tmp = np.empty(max(n1, n2) + 1, "i") # for histogram + nedge = cImageD11.compress_duplicates(row, col, ect, tj, tmp) # overwrites row/col in place : ignore the zero label (hope it is not there) - crow = row[:nedge]-1 - ccol = col[:nedge]-1 + crow = row[:nedge] - 1 + ccol = col[:nedge] - 1 cdata = ect[:nedge] - cedges = scipy.sparse.coo_matrix( ( cdata, (crow, ccol)), shape=(n1, n2) ) + cedges = scipy.sparse.coo_matrix((cdata, (crow, ccol)), shape=(n1, n2)) # really? return cedges -def sparse_connected_pixels( frame, - label_name="connectedpixels", - data_name="intensity", - threshold=None ): +def sparse_connected_pixels( + frame, label_name="connectedpixels", data_name="intensity", threshold=None +): """ frame = a sparse frame label_name = the array to save labels to in that frame data_name = an array in that frame threshold = float value or take data.threshold """ - labels = np.zeros( frame.nnz, "i" ) + labels = np.zeros(frame.nnz, "i") if threshold is None: threshold = frame.meta[data_name]["threshold"] nlabel = cImageD11.sparse_connectedpixels( - frame.pixels[data_name], frame.row, frame.col, - threshold, labels ) - frame.set_pixels( label_name, labels, { 'nlabel' : nlabel } ) + frame.pixels[data_name], frame.row, frame.col, threshold, labels + ) + frame.set_pixels(label_name, labels, {"nlabel": nlabel}) return nlabel -def sparse_localmax( frame, - label_name="localmax", - data_name = "intensity" ): - labels = np.zeros( frame.nnz, "i" ) - vmx = np.zeros( frame.nnz, np.float32 ) - imx = np.zeros( frame.nnz, 'i') +def sparse_localmax(frame, label_name="localmax", data_name="intensity"): + labels = np.zeros(frame.nnz, "i") + vmx = np.zeros(frame.nnz, np.float32) + imx = np.zeros(frame.nnz, "i") nlabel = cImageD11.sparse_localmaxlabel( - frame.pixels[data_name], frame.row, frame.col, - vmx, imx, labels ) - frame.set_pixels( label_name, labels, { "nlabel" : nlabel } ) + frame.pixels[data_name], frame.row, frame.col, vmx, imx, labels + ) + frame.set_pixels(label_name, labels, {"nlabel": nlabel}) return nlabel -def sparse_smooth( frame, data_name='intensity' ): - smoothed = np.zeros( frame.nnz, np.float32 ) - cImageD11.sparse_smooth( frame.pixels[data_name], - frame.row, - frame.col, - smoothed ) +def sparse_smooth(frame, data_name="intensity"): + smoothed = np.zeros(frame.nnz, np.float32) + cImageD11.sparse_smooth(frame.pixels[data_name], frame.row, frame.col, smoothed) return smoothed - - - - - - - - diff --git a/ImageD11/sym_u.py b/ImageD11/sym_u.py index cdb22444..9d6c0efc 100644 --- a/ImageD11/sym_u.py +++ b/ImageD11/sym_u.py @@ -1,34 +1,33 @@ - from __future__ import print_function ## Automatically adapted for numpy.oldnumeric Sep 06, 2007 by alter_code1.py - - import numpy as np -import logging DEBUG = False + def m_from_string(s): """ Creates a symmetry operator from a string """ m = [] - t = np.array(eval("lambda x,y,z: ( %s )"%(s))(0,0,0)) - for v1,v2,v3 in [ [ 1,0,0] , [ 0,1,0], [0,0,1] ]: - r = eval("lambda x,y,z: ( %s )"%(s))(v1,v2,v3) - m.append(np.array(r)-t) + t = np.array(eval("lambda x,y,z: ( %s )" % (s))(0, 0, 0)) + for v1, v2, v3 in [[1, 0, 0], [0, 1, 0], [0, 0, 1]]: + r = eval("lambda x,y,z: ( %s )" % (s))(v1, v2, v3) + m.append(np.array(r) - t) return np.array(m) + def fmt(c): if c == 1: return "+" if c == -1: return "-" else: - return "%f"%(c) + return "%f" % (c) + def m_to_string(m): """ @@ -36,30 +35,26 @@ def m_to_string(m): """ st = [] for i in range(3): - for v,s in zip([ [ 1,0,0] , [ 0,1,0], [0,0,1] ], - "xyz"): - c = np.dot(v,m.T[i]) - if abs(c)>0: - st.append( "%s%s"%(fmt(c),s)) - if i<2: + for v, s in zip([[1, 0, 0], [0, 1, 0], [0, 0, 1]], "xyz"): + c = np.dot(v, m.T[i]) + if abs(c) > 0: + st.append("%s%s" % (fmt(c), s)) + if i < 2: st.append(",") return "".join(st) - - - - - class group: - """ An abstract mathematical finite(?) point rotation groups """ + """An abstract mathematical finite(?) point rotation groups""" + def __init__(self, tol=1e-5): """ Basic group is identity tol is for numerical comparison of group membership """ - self.group = [ np.identity(3, float) ] + self.group = [np.identity(3, float)] self.tol = 1e-5 + def op(self, x, y): """ Normally multiplication ? @@ -71,11 +66,13 @@ def op(self, x, y): # Only appears to make sense for pure rotation matrices # assert abs(d-1)<1e-6, (str((d,m,x,y))) return m + def comp(self, x, y): """ Compare two things for equality """ - return np.allclose(x, y, rtol = self.tol, atol=self.tol) + return np.allclose(x, y, rtol=self.tol, atol=self.tol) + def isMember(self, item): """ Decide if item is already in the group @@ -84,6 +81,7 @@ def isMember(self, item): if self.comp(g, item): return True return False + def additem(self, item): """ add a new member @@ -91,9 +89,10 @@ def additem(self, item): item = np.asarray(item) if not self.isMember(item): self.group.append(item) - #else: + # else: # logging.warning(str(item)+" is already a group member") self.makegroup() + def makegroup(self): """ ensure all items = op(x,y) are in group @@ -105,24 +104,27 @@ def makegroup(self): while new: for a in self.group: for b in self.group: - c = self.op(a,b) + c = self.op(a, b) new = True if self.isMember(c): - new=False + new = False if new: - if DEBUG: print("adding",c,"to group") + if DEBUG: + print("adding", c, "to group") self.group.append(c) + symcache = {} + def generate_group(*args): global symcache if args in symcache: return symcache[args] - g=group() + g = group() for a in args: g.additem(m_from_string(a)) - symcache[args]=g + symcache[args] = g return g @@ -180,48 +182,60 @@ def generate_group(*args): proper_point_groups = """1 2 3 4 6 222 322 422 622 23 432""".split() assert len(proper_point_groups) == 11 -improper_centrosymmetrical_point_groups = """-1 2/m -3 4/m 6/m mmm -3m 4/mmm 6/mmm m-3 m3m""".split() +improper_centrosymmetrical_point_groups = ( + """-1 2/m -3 4/m 6/m mmm -3m 4/mmm 6/mmm m-3 m3m""".split() +) assert len(improper_centrosymmetrical_point_groups) == 11 improper_acentric_point_groups = """m -4 -6 mm2 3mm 4mm -42m 6mm -62m -43m""".split() assert len(improper_acentric_point_groups) == 10 + def cubic(): - """ P32 """ - return generate_group( "z,x,y", "-y,x,z" ) + """P32""" + return generate_group("z,x,y", "-y,x,z") + def hexagonal(): - #""" P6 168 """ - #return generate_group ( "-y,x-y,z", "-x,-y,z" ) - """ P6/mmm 191""" - return generate_group ( "-y,x-y,z", "-x,-y,z", "y,x,-z" ) + # """ P6 168 """ + # return generate_group ( "-y,x-y,z", "-x,-y,z" ) + """P6/mmm 191""" + return generate_group("-y,x-y,z", "-x,-y,z", "y,x,-z") + def trigonal(): - """ P321 150 """ - return generate_group ( "y,-x-y,z", "y,x,-z" ) + """P321 150""" + return generate_group("y,-x-y,z", "y,x,-z") + def rhombohedralP(): - """ R3 primitive """ + """R3 primitive""" return generate_group("z,x,y", "-z,-y,-x") + def tetragonal(): - """ P4 75""" - return generate_group ( "-y,x,z", "-x,y,-z" ) + """P4 75""" + return generate_group("-y,x,z", "-x,y,-z") + def orthorhombic(): - """ P222 16 """ - return generate_group( "-x,-y,z", "-x,y,-z" ) + """P222 16""" + return generate_group("-x,-y,z", "-x,y,-z") + def monoclinic_c(): - return generate_group("-x,-y,z" ) + return generate_group("-x,-y,z") + def monoclinic_a(): - return generate_group("x,-y,-z" ) + return generate_group("x,-y,-z") + def monoclinic_b(): - return generate_group("-x,y,-z" ) + return generate_group("-x,y,-z") + def triclinic(): - return generate_group("x, y, z" ) + return generate_group("x, y, z") def find_uniq_u(u, grp, debug=0, func=np.trace): @@ -230,7 +244,8 @@ def find_uniq_u(u, grp, debug=0, func=np.trace): for o in grp.group: cand = grp.op(o, u) t = func(cand) - if debug: print(t) + if debug: + print(t) if func(cand) > tmax: uniq = cand tmax = t @@ -239,19 +254,20 @@ def find_uniq_u(u, grp, debug=0, func=np.trace): def hklmax(h, hmax=1000): # Assumes |h| < hmax - return (h[0]*hmax + h[1])*hmax + h[2] + return (h[0] * hmax + h[1]) * hmax + h[2] + -def find_uniq_hkls( hkls, grp, func=hklmax): - assert hkls.shape[0] == 3, 'hkls must be 3xn array' +def find_uniq_hkls(hkls, grp, func=hklmax): + assert hkls.shape[0] == 3, "hkls must be 3xn array" uniq = hkls.copy() - tmax = func( hkls ) + tmax = func(hkls) for o in grp.group: - cand = grp.op( o, hkls) + cand = grp.op(o, hkls) t = func(cand) msk = t > tmax for i in range(3): - uniq[i] = np.where( msk , cand[i], uniq[i] ) - tmax = np.where( msk , t, tmax ) + uniq[i] = np.where(msk, cand[i], uniq[i]) + tmax = np.where(msk, t, tmax) return uniq @@ -261,33 +277,37 @@ class trans_group(group): FIXME - this is mostly done in lattice_reduction.py instead now """ - def __init__(self, tol = 1e-5): + + def __init__(self, tol=1e-5): """ Identity is to not move at all """ - group.__init__(self,tol) - + group.__init__(self, tol) + def op(self, x, y): """ Means of generating new thing from two others In this case add them and mod by group members """ return self.reduce(x + y) + def reduce(self, v): """ Perform lattice reduction """ - vc = np.array(v).copy() # copies + vc = np.array(v).copy() # copies for o in self.group: vc = self.mod(vc, o) # if DEBUG: print "reduced",v,vc return vc + def additem(self, x): - """ Do lattice reduction before adding as infinite group""" - t = self.reduce(x) + """Do lattice reduction before adding as infinite group""" + _ = self.reduce(x) group.additem(self, self.reduce(x)) # Now try to remove anything which is spare?? return + def mod(self, x, y): """ Remove y from x to give smallest possible result @@ -295,63 +315,68 @@ def mod(self, x, y): """ ly2 = np.dot(y, y) if ly2 > 1e-9: - ny = np.dot(x,y)/ly2 - parl = ny * y + ny = np.dot(x, y) / ly2 + # parl = ny * y ints = np.round_(ny) return x - ints * y else: return x + def isMember(self, x): return group.isMember(self, self.reduce(x)) + def test(): - assert np.allclose( m_from_string( "x,y,z" ), np.identity(3)) - assert np.allclose( m_from_string( "-y,x,z" ), np.array([ [ 0,1,0], - [-1,0,0], - [ 0,0,1]] )) - assert np.allclose( m_from_string( "-y,y-x,z" ), np.array([[ 0,-1,0], - [ -1, 1,0], - [ 0, 0,1]] )) + assert np.allclose(m_from_string("x,y,z"), np.identity(3)) + assert np.allclose( + m_from_string("-y,x,z"), np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) + ) + assert np.allclose( + m_from_string("-y,y-x,z"), np.array([[0, -1, 0], [-1, 1, 0], [0, 0, 1]]) + ) print("testing1") - for op in [ "x,y,z", "-y,x-y,z", "-y,x,z"]: + for op in ["x,y,z", "-y,x-y,z", "-y,x,z"]: d = np.linalg.det(m_from_string(op)) - assert d == 1.0, "Determinant = %f %s"%(d,op) + assert d == 1.0, "Determinant = %f %s" % (d, op) print("testing2") assert len(cubic().group) == 24, "not 24 ops found for cubic !" - assert len(hexagonal().group) == 12 ,"not 12 ops found for hexagonal !" - assert len(trigonal().group) == 6 ,"not 7 ops found for trigonal !"+\ - str(trigonal().group) - assert len(tetragonal().group) == 8 ,"not 8 ops found for tetragonal !" - assert len(orthorhombic().group) == 4 ,"not 4 ops found for orthorhombic !" + assert len(hexagonal().group) == 12, "not 12 ops found for hexagonal !" + assert len(trigonal().group) == 6, "not 7 ops found for trigonal !" + str( + trigonal().group + ) + assert len(tetragonal().group) == 8, "not 8 ops found for tetragonal !" + assert len(orthorhombic().group) == 4, "not 4 ops found for orthorhombic !" print("testing3") - for f in [ monoclinic_a, monoclinic_b, monoclinic_c]: + for f in [monoclinic_a, monoclinic_b, monoclinic_c]: r = f().group assert len(r) == 2, " not 2 ops for monoclinic " assert np.allclose( - find_uniq_u( np.array( - [[0,1,0],[-1,0,0],[0,0,1]]),cubic()), - np.identity(3) ), "Should easily get this unique choice" + find_uniq_u(np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]), cubic()), + np.identity(3), + ), "Should easily get this unique choice" # translational groups g1 = trans_group() g2 = trans_group() - ops = [ np.array( [ 1,0,0], float) , - np.array( [ 0,1,0], float) , - np.array( [ 0,0,1], float) ] + ops = [ + np.array([1, 0, 0], float), + np.array([0, 1, 0], float), + np.array([0, 0, 1], float), + ] for op in ops: g1.additem(op) g2.additem(op) - g2.additem( np.array( [ 5,6,7], float) ) + g2.additem(np.array([5, 6, 7], float)) for op2 in g2.group: found = False for op1 in g1.group: - if ( op1 == op2).all(): + if (op1 == op2).all(): found = True if not found: - raise Exception ("Bad translation groups") - assert not g2.isMember([0.1,0.5,10]), "Not a member" - assert g2.isMember([99,-1e5,4e7]), "Not a member" + raise Exception("Bad translation groups") + assert not g2.isMember([0.1, 0.5, 10]), "Not a member" + assert g2.isMember([99, -1e5, 4e7]), "Not a member" global DEBUG DEBUG = True g2.additem([0.1, 0.45, 10]) @@ -364,24 +389,32 @@ def getgroup(s): convert a user supplied string to a group ... a little vague still """ - if s in ['cubic', 'hexagonal','trigonal','tetragonal', - 'orthorhombic','monoclinic_c','monoclinic_a', - 'monoclinic_b','triclinic','rhombohedralP']: + if s in [ + "cubic", + "hexagonal", + "trigonal", + "tetragonal", + "orthorhombic", + "monoclinic_c", + "monoclinic_a", + "monoclinic_b", + "triclinic", + "rhombohedralP", + ]: import ImageD11.sym_u - return getattr(ImageD11.sym_u, s) + return getattr(ImageD11.sym_u, s) -if __name__=="__main__": +if __name__ == "__main__": test() + u = np.array( + [ + [0.71850787, 0.69517833, 0.02176059], + [-0.62925889, 0.66306714, -0.40543213], + [-0.29627636, 0.27761313, 0.91386611], + ] + ) - u = np.array([[ 0.71850787 , 0.69517833, 0.02176059], - [-0.62925889 , 0.66306714, -0.40543213], - [-0.29627636 , 0.27761313 , 0.91386611]]) - - find_uniq_u(u,cubic(),debug=0) - - - - + find_uniq_u(u, cubic(), debug=0) diff --git a/ImageD11/symops.py b/ImageD11/symops.py index 928639b3..210b68a6 100644 --- a/ImageD11/symops.py +++ b/ImageD11/symops.py @@ -1,172 +1,195 @@ - from __future__ import print_function - # Systematic absence checking # by Gavin Vaughan, April 2011 # Cell centering -def lattice_centre(h,k,l,ctype): - if ctype == "P": - return False - elif ctype == "A": - return (k+l)%2 != 0 - elif ctype == "B": - return (h+l)%2 != 0 - elif ctype == "C": - return (h+k)%2 != 0 - elif ctype == "I": - return (h+k+l)%2 != 0 - elif ctype == "F": - return (h+k)%2!=0 or (h+l)%2!=0 or (k+l)%2!=0 - elif ctype == "R": - return (-h+k+l)%3 != 0 - else: - return False + +def lattice_centre(h, k, l, ctype): + if ctype == "P": + return False + elif ctype == "A": + return (k + l) % 2 != 0 + elif ctype == "B": + return (h + l) % 2 != 0 + elif ctype == "C": + return (h + k) % 2 != 0 + elif ctype == "I": + return (h + k + l) % 2 != 0 + elif ctype == "F": + return (h + k) % 2 != 0 or (h + l) % 2 != 0 or (k + l) % 2 != 0 + elif ctype == "R": + return (-h + k + l) % 3 != 0 + else: + return False + # rotations -def rotation_axis(h,k,l,rtype,axis): - return False + +def rotation_axis(h, k, l, rtype, axis): + return False + # mirrors -def mirror_plane(h,k,l,axis): - return False + +def mirror_plane(h, k, l, axis): + return False + # 1st type of screw axes -def screw_axis(h,k,l,stype,axis): - if stype== '21' or stype == '42' or stype == '63': - mod=2 - elif stype == '31' or stype == '32' or stype == '62' or stype == '64': - mod=3 - elif stype == '41' or stype == '43': - mod=4 - elif stype == '61' or stype == '65': - mod=6 - else: - raise Exception(stype+" is not a valid screw axis\n") - if axis == 1: - if k != 0 and l != 0: return False - return h%mod != 0 - if axis == 2: - if h != 0 and l != 0: return False - return k%mod != 0 - if axis == 3: - if h != 0 and k != 0: return False - return l%mod != 0 - return False - - -# Glide Planes - -def glide_plane(h,k,l,gtype,axis): - if axis == 1: - if h != 0: return False - elif axis == 2: - if k != 0: return False - elif axis == 3: - if l != 0: return False - return glidetest(gtype,axis) - - -def glidetest(gtype,axis): - if gtype == 'a': - return h%2 != 0 - if gtype == 'b': - return k%2 != 0 - if gtype == 'c': - return l%2 != 0 - if gtype == 'n' : - if axis == 1: return (k+l)%2 != 0 - if axis == 2: return (h+l)%2 != 0 - if axis == 3: return (h+k)%2 != 0 - if gtype == 'd': - if axis == 1: return (k+l)%4 != 0 - if axis == 2: return (h+l)%4 != 0 - if axis == 3: return (h+k)%4 != 0 - return False - + +def screw_axis(h, k, l, stype, axis): + if stype == "21" or stype == "42" or stype == "63": + mod = 2 + elif stype == "31" or stype == "32" or stype == "62" or stype == "64": + mod = 3 + elif stype == "41" or stype == "43": + mod = 4 + elif stype == "61" or stype == "65": + mod = 6 + else: + raise Exception(stype + " is not a valid screw axis\n") + if axis == 1: + if k != 0 and l != 0: + return False + return h % mod != 0 + if axis == 2: + if h != 0 and l != 0: + return False + return k % mod != 0 + if axis == 3: + if h != 0 and k != 0: + return False + return l % mod != 0 + return False + + +# Glide Planes + + +def glide_plane(h, k, l, gtype, axis): + if axis == 1: + if h != 0: + return False + elif axis == 2: + if k != 0: + return False + elif axis == 3: + if l != 0: + return False + return glidetest(gtype, axis) + + +def glidetest(gtype, axis): + if gtype == "a": + return h % 2 != 0 + if gtype == "b": + return k % 2 != 0 + if gtype == "c": + return l % 2 != 0 + if gtype == "n": + if axis == 1: + return (k + l) % 2 != 0 + if axis == 2: + return (h + l) % 2 != 0 + if axis == 3: + return (h + k) % 2 != 0 + if gtype == "d": + if axis == 1: + return (k + l) % 4 != 0 + if axis == 2: + return (h + l) % 4 != 0 + if axis == 3: + return (h + k) % 4 != 0 + return False + + def test_absence(h, k, l, sg): -# break down the SG name into its symmetry components -# e.g. P21c -> P 1 21/c 1 -# To be unambiguous, it would be best if they were entered in the explicit way -# P21c could mean e.g. P 2 1 c or P 1 21 c or P 21 c 1 etc etc -# So use an unambiguous string like "P 1 21/c 1" - symmop=sgstring.split() - if len(symmop) != 4: - print("Only read %d symmops"%symmop) - raise Exception("You must supply at least 4 space separated symmetry operations"\ - " and optional compound operations separated by '/'") - - is_absent=lattice_centre(h,k,l,symmop[0]) - if is_absent: return True - - for i in range(1,4): - if "/" in symmop[i]: - op1, op2 = symmop[i].split("/") - is_absent = checkop(h,k,l,op1,i) - if is_absent: return True - is_absent = checkop(h,k,l,op2,i) - if is_absent: return True - else: - is_absent = checkop(h,k,l,symmop[i],i) - if is_absent: return True - - return False - - -def checkop(h,k,l,op,axis): -# could do something clever, but this is just brute force - rots=['1', '-1', '2', '-2', '3', '-3', '4', '-4', '6', '-6'] - mirror=['m'] - screws=['21', '31', '41', '61', '32', '42', '62', '43', '63', '64', '65'] - glides=['a', 'b', 'c', 'n', 'd'] - - if op in rots: - return False - if op in mirror: - return False - if op in screws: - return screw_axis(h,k,l,op,axis) - if op in glides: - return glide_plane(h,k,l,op,axis) - raise Exception(op+" is not a valid symmetry operation") - - -#never mind inversion for the absence check - -if __name__ == "__main__": - import sys - if len(sys.argv) != 8: - print("Usage %s h k l sg_with_spaces"%(sys.argv[0])) - print(len(sys.argv)) - sys.exit() - try: - h=int(sys.argv[1]) - except: - print("Sorry %s is not an integer\n" % sys.argv[1]) - sys.exit() - try: - k=int(sys.argv[2]) - except: - print("Sorry %s is not an integer\n" % sys.argv[2]) - sys.exit() - try: - l=int(sys.argv[3]) - except: - print("Sorry %s is not an integer\n" % sys.argv[3]) - sys.exit() - - - sgstring=sys.argv[4]+" "+sys.argv[5]+" "+sys.argv[6]+" "+sys.argv[7] - absent=test_absence(h,k,l,sgstring) - if absent is True: - print("The ",h,k,l,"reflection is absent in space group "+sgstring) - else: - print("The ",h,k,l,"reflection is present in space group "+sgstring) - - + # break down the SG name into its symmetry components + # e.g. P21c -> P 1 21/c 1 + # To be unambiguous, it would be best if they were entered in the explicit way + # P21c could mean e.g. P 2 1 c or P 1 21 c or P 21 c 1 etc etc + # So use an unambiguous string like "P 1 21/c 1" + symmop = sgstring.split() + if len(symmop) != 4: + print("Only read %d symmops" % symmop) + raise Exception( + "You must supply at least 4 space separated symmetry operations" + " and optional compound operations separated by '/'" + ) + + is_absent = lattice_centre(h, k, l, symmop[0]) + if is_absent: + return True + + for i in range(1, 4): + if "/" in symmop[i]: + op1, op2 = symmop[i].split("/") + is_absent = checkop(h, k, l, op1, i) + if is_absent: + return True + is_absent = checkop(h, k, l, op2, i) + if is_absent: + return True + else: + is_absent = checkop(h, k, l, symmop[i], i) + if is_absent: + return True + + return False + + +def checkop(h, k, l, op, axis): + # could do something clever, but this is just brute force + rots = ["1", "-1", "2", "-2", "3", "-3", "4", "-4", "6", "-6"] + mirror = ["m"] + screws = ["21", "31", "41", "61", "32", "42", "62", "43", "63", "64", "65"] + glides = ["a", "b", "c", "n", "d"] + + if op in rots: + return False + if op in mirror: + return False + if op in screws: + return screw_axis(h, k, l, op, axis) + if op in glides: + return glide_plane(h, k, l, op, axis) + raise Exception(op + " is not a valid symmetry operation") + + +# never mind inversion for the absence check + +if __name__ == "__main__": + import sys + + if len(sys.argv) != 8: + print("Usage %s h k l sg_with_spaces" % (sys.argv[0])) + print(len(sys.argv)) + sys.exit() + try: + h = int(sys.argv[1]) + except: + print("Sorry %s is not an integer\n" % sys.argv[1]) + sys.exit() + try: + k = int(sys.argv[2]) + except: + print("Sorry %s is not an integer\n" % sys.argv[2]) + sys.exit() + try: + l = int(sys.argv[3]) + except: + print("Sorry %s is not an integer\n" % sys.argv[3]) + sys.exit() + + sgstring = sys.argv[4] + " " + sys.argv[5] + " " + sys.argv[6] + " " + sys.argv[7] + absent = test_absence(h, k, l, sgstring) + if absent is True: + print("The ", h, k, l, "reflection is absent in space group " + sgstring) + else: + print("The ", h, k, l, "reflection is present in space group " + sgstring) diff --git a/ImageD11/tkGui/guiindexer.py b/ImageD11/tkGui/guiindexer.py index 57c0ee55..6d9e66da 100644 --- a/ImageD11/tkGui/guiindexer.py +++ b/ImageD11/tkGui/guiindexer.py @@ -1,5 +1,3 @@ - - from __future__ import print_function # ImageD11_v0.4 Software for beamline ID11 @@ -31,6 +29,7 @@ from .listdialog import listdialog from . import twodplot import threading + try: import Tkinter as Tk except: @@ -40,27 +39,27 @@ class run_idx: def __init__(self, parent, idxer): self.top = Tk.Toplevel(parent) - self.label = Tk.Label(self.top, text="Found so far %8d"%(0)) + self.label = Tk.Label(self.top, text="Found so far %8d" % (0)) self.label.pack() self.btn = Tk.Button(self.top, text="Stop", command=self.stop) self.btn.pack() self.idxer = idxer - self.thr = threading.Thread( target=self.idxer.score_all_pairs ) + self.thr = threading.Thread(target=self.idxer.score_all_pairs) self.thr.start() self.top.after(1000, self.update) def stop(self): - self.label.configure(text="Got a stop, so far %8d"%(len(self.idxer.ubis))) - self.btn.configure(text='Stopping') - self.idxer.stop=True + self.label.configure(text="Got a stop, so far %8d" % (len(self.idxer.ubis))) + self.btn.configure(text="Stopping") + self.idxer.stop = True self.thr.join() self.top.destroy() def update(self): - self.label.configure( text="Tested %8d of %8d\nFound so far %8d"%( - self.idxer.tried, - self.idxer.npairs, - len(self.idxer.ubis)) ) + self.label.configure( + text="Tested %8d of %8d\nFound so far %8d" + % (self.idxer.tried, self.idxer.npairs, len(self.idxer.ubis)) + ) if self.idxer.tried == self.idxer.npairs: self.top.destroy() else: @@ -68,105 +67,111 @@ def update(self): class guiindexer: - - def __init__(self,parent): + def __init__(self, parent): """ Parent (arg) is a hook to features of the parent gui sets up indexing menuitems """ - self.parent=parent -# peaks are in self.parent.finalpeaks - self.menuitems = ( "Indexing", 0, - [ ( "Load g-vectors", 0, self.loadgv), - ( "Plot x/y/z", 5, self.plotxyz), - ( "Load parameters", 1, self.loadfileparameters), - ( "Edit parameters", 0, self.editparameters), - ( "Assign peaks to powder rings", 0, self.assignpeaks), - ( "Make Friedel pair file", 5, self.makefriedel), - ( "Generate trial orientations",0, self.find), - ( "Score trial orientations",0, self.scorethem), - ( "Auto-find",2, self.autofind), - ( "Histogram fit quality",0, self.histogram_drlv_fit), - ( "Save parameters", 0, self.saveparameters), - ( "Save UBI matrices", 5, self.saveubis), - ( "Write out indexed peaks",0,self.saveindexing), - ( "Reset indexer",0,self.reset), - ] ) - self.plot3d=None + self.parent = parent + # peaks are in self.parent.finalpeaks + self.menuitems = ( + "Indexing", + 0, + [ + ("Load g-vectors", 0, self.loadgv), + ("Plot x/y/z", 5, self.plotxyz), + ("Load parameters", 1, self.loadfileparameters), + ("Edit parameters", 0, self.editparameters), + ("Assign peaks to powder rings", 0, self.assignpeaks), + ("Make Friedel pair file", 5, self.makefriedel), + ("Generate trial orientations", 0, self.find), + ("Score trial orientations", 0, self.scorethem), + ("Auto-find", 2, self.autofind), + ("Histogram fit quality", 0, self.histogram_drlv_fit), + ("Save parameters", 0, self.saveparameters), + ("Save UBI matrices", 5, self.saveubis), + ("Write out indexed peaks", 0, self.saveindexing), + ("Reset indexer", 0, self.reset), + ], + ) + self.plot3d = None def autofind(self): self.parent.guicommander.commandscript += "myindexer.score_all_pairs()\n" - run_idx( self.parent, self.parent.guicommander.objects['indexer']) - + run_idx(self.parent, self.parent.guicommander.objects["indexer"]) def loadgv(self): - """ see indexing.readgvfile """ - filename=self.parent.opener.show( + """see indexing.readgvfile""" + filename = self.parent.opener.show( title="File containing g-vectors", - filetypes=[ ("Gvector files", "*.gve"), - ("Gvector files", "*.gv") ] ) - self.parent.guicommander.execute("indexer","readgvfile",filename) + filetypes=[("Gvector files", "*.gve"), ("Gvector files", "*.gv")], + ) + self.parent.guicommander.execute("indexer", "readgvfile", filename) def saveubis(self): - """ see indexing.saveubis """ - filename=self.parent.saver.show(title="File to save UBIS") - self.parent.guicommander.execute("indexer","saveubis",filename) + """see indexing.saveubis""" + filename = self.parent.saver.show(title="File to save UBIS") + self.parent.guicommander.execute("indexer", "saveubis", filename) def makefriedel(self): - """ see indexing.friedelpairs """ - filename=self.parent.saver.show(title="File to save Friedelpairs") - self.parent.guicommander.execute("indexer","friedelpairs",filename) + """see indexing.friedelpairs""" + filename = self.parent.saver.show(title="File to save Friedelpairs") + self.parent.guicommander.execute("indexer", "friedelpairs", filename) def scorethem(self): - """ see indexing.scorethem """ - self.parent.guicommander.execute("indexer","scorethem") + """see indexing.scorethem""" + self.parent.guicommander.execute("indexer", "scorethem") def histogram_drlv_fit(self): """ Calls indexer.histogram_drlv_fit Plots indexer.bins versus indexer.histogram """ - self.parent.guicommander.execute("indexer","histogram_drlv_fit") - x=self.parent.guicommander.getdata("indexer","bins") - y=self.parent.guicommander.getdata("indexer","histogram") - self.parent.twodplotter.plotitems={} # clears plot + self.parent.guicommander.execute("indexer", "histogram_drlv_fit") + x = self.parent.guicommander.getdata("indexer", "bins") + y = self.parent.guicommander.getdata("indexer", "histogram") + self.parent.twodplotter.plotitems = {} # clears plot import matplotlib.cm + for yline in range(y.shape[0]): - color = matplotlib.cm.jet( yline*1.0/y.shape[0] ) - print("yline, color",yline,color) - self.parent.twodplotter.plotitems["drlv histogram"+str(yline)]=twodplot.data( - x[1:],y[yline,:], - {"xlabel" : "drlv", - "ylabel" : "freq", - "title" : "drlv histogram", - 'plotopts' : { "linestyle" : "-", - "marker" : "o", - "markersize" : 1, - "alpha" : 0.8, - "color" : color, - } - } - ) # data + color = matplotlib.cm.jet(yline * 1.0 / y.shape[0]) + print("yline, color", yline, color) + self.parent.twodplotter.plotitems[ + "drlv histogram" + str(yline) + ] = twodplot.data( + x[1:], + y[yline, :], + { + "xlabel": "drlv", + "ylabel": "freq", + "title": "drlv histogram", + "plotopts": { + "linestyle": "-", + "marker": "o", + "markersize": 1, + "alpha": 0.8, + "color": color, + }, + }, + ) # data self.parent.twodplotter.replot() - def assignpeaks(self): - """ see indexing.assigntorings """ - self.parent.guicommander.execute("indexer","assigntorings") + """see indexing.assigntorings""" + self.parent.guicommander.execute("indexer", "assigntorings") def loadfileparameters(self): - """ see indexing.loadpars and parameters.loadpars """ - filename=self.parent.opener.show( + """see indexing.loadpars and parameters.loadpars""" + filename = self.parent.opener.show( title="File containing indexing parameters", - filetypes = [ ("Parameter files", "*.prm"), - ("Parameter files", "*.par") ] ) - self.parent.guicommander.execute("indexer","loadpars",filename) + filetypes=[("Parameter files", "*.prm"), ("Parameter files", "*.par")], + ) + self.parent.guicommander.execute("indexer", "loadpars", filename) def saveparameters(self): - """ see indexing.savepars and parameters.savepars """ - filename=self.parent.saver.show(title="File to save indexing parameters") - self.parent.guicommander.execute("indexer","savepars",filename) - + """see indexing.savepars and parameters.savepars""" + filename = self.parent.saver.show(title="File to save indexing parameters") + self.parent.guicommander.execute("indexer", "savepars", filename) def editparameters(self): """ @@ -178,15 +183,16 @@ def editparameters(self): eg : loadpars(None) """ # First make the indexer update its parameters object - self.parent.guicommander.execute("indexer","updateparameters") # no filename arg + self.parent.guicommander.execute( + "indexer", "updateparameters" + ) # no filename arg # Now borrow a copy to read them and edit - pars = self.parent.guicommander.getdata("indexer","pars") - d=listdialog(self.parent,items=pars,title="Indexing parameters") - self.parent.guicommander.execute("indexer","parameterobj.set_parameters",d.result) - self.parent.guicommander.execute("indexer","loadpars") # and use them - - - + pars = self.parent.guicommander.getdata("indexer", "pars") + d = listdialog(self.parent, items=pars, title="Indexing parameters") + self.parent.guicommander.execute( + "indexer", "parameterobj.set_parameters", d.result + ) + self.parent.guicommander.execute("indexer", "loadpars") # and use them def plotxyz(self): """ @@ -194,33 +200,33 @@ def plotxyz(self): Plots the x,y,z (gv) array in a 3D opengl window """ import logging + try: from . import plot3d except: import traceback + traceback.print_last() logging.warning("You might have a PyOpenGl problem??") return - gv = self.parent.guicommander.getdata("indexer","gv") + gv = self.parent.guicommander.getdata("indexer", "gv") if gv is not None: - if self.plot3d==None: - self.plot3d = plot3d.plot3d(self.parent,gv) + if self.plot3d == None: + self.plot3d = plot3d.plot3d(self.parent, gv) self.plot3d.go() logging.debug("self.plot3d " + str(self.plot3d)) else: self.plot3d.changedata(gv) def find(self): - """ see indexing.find """ - self.parent.guicommander.execute("indexer","find") + """see indexing.find""" + self.parent.guicommander.execute("indexer", "find") def saveindexing(self): - """ see indexing.saveindexing """ - filename=self.parent.saver.show(title="File to save indexing output") - self.parent.guicommander.execute("indexer","saveindexing",filename) - + """see indexing.saveindexing""" + filename = self.parent.saver.show(title="File to save indexing output") + self.parent.guicommander.execute("indexer", "saveindexing", filename) def reset(self): - """ see indexing.reset """ - self.parent.guicommander.execute("indexer","reset") - + """see indexing.reset""" + self.parent.guicommander.execute("indexer", "reset") diff --git a/ImageD11/tkGui/guimaker.py b/ImageD11/tkGui/guimaker.py index 27e1d9e0..c052f36a 100644 --- a/ImageD11/tkGui/guimaker.py +++ b/ImageD11/tkGui/guimaker.py @@ -1,4 +1,3 @@ - """ Script to put the menus together and build an appli with each bit being relatively clutterfree @@ -14,36 +13,42 @@ try: import Tkinter as Tk except: - # python 3 ? + # python 3 ? import tkinter as Tk -class GuiMaker(Tk.Frame): # Inherit from Tk frame + +class GuiMaker(Tk.Frame): # Inherit from Tk frame """ You must inherit from this class and implement the start and makeWidgets methods """ - menuBar=[] - def __init__(self,parent=None): - Tk.Frame.__init__(self,parent) + + menuBar = [] + + def __init__(self, parent=None): + Tk.Frame.__init__(self, parent) self.pack(expand=Tk.YES, fill=Tk.BOTH) import ImageD11 - self.statusbar=Tk.Label(self,text="Welcome to ImageD11 version "+ImageD11.__version__) + + self.statusbar = Tk.Label( + self, text="Welcome to ImageD11 version " + ImageD11.__version__ + ) self.statusbar.pack(side=Tk.BOTTOM) self.start() self.makeWidgets() self.makeMenuBar() def makeMenuBar(self): - menubar = Tk.Menu(self.master,tearoff=0) + menubar = Tk.Menu(self.master, tearoff=0) self.master.config(menu=menubar) for (name, key, items) in self.menuBar: pulldown = Tk.Menu(menubar) - self.addMenuItems(pulldown,items) + self.addMenuItems(pulldown, items) menubar.add_cascade(label=name, underline=key, menu=pulldown) def addMenuItems(self, menu, items): for item in items: - if item=="separator": + if item == "separator": menu.add_separator({}) else: - menu.add_command(label = item[0], underline = item[1], command=item[2] ) + menu.add_command(label=item[0], underline=item[1], command=item[2]) diff --git a/ImageD11/tkGui/guipeaksearch.py b/ImageD11/tkGui/guipeaksearch.py index 3ba08fb1..0b7e8b61 100644 --- a/ImageD11/tkGui/guipeaksearch.py +++ b/ImageD11/tkGui/guipeaksearch.py @@ -1,5 +1,3 @@ - - # ImageD11_v0.4 Software for beamline ID11 # Copyright (C) 2005 Jon Wright # @@ -34,27 +32,36 @@ class guipeaksearcher: All communication should be via parent guicommander object """ - def __init__(self,parent,quiet="No"): + + def __init__(self, parent, quiet="No"): """ Parent is a hook to features of the parent gui sets up peakmerging menu items + message for searching """ - self.parent=parent - self.quiet=quiet - #print "I am in quiet mode",quiet - self.menuitems = ( "PeakSearching", 4, - [ ( "Search raw images" , 0, self.searchraw ), - ( "Read pks file", 0, self.readpeaks ), - ( "Harvest peaks", 0, self.harvestpeaks), - ( "Merge peaks", 0, self.mergepeaks ), - ( "Filter peaks", 0, self.filter ), - ( "Save good peaks",2, self.savepeaks) ] ) + self.parent = parent + self.quiet = quiet + # print "I am in quiet mode",quiet + self.menuitems = ( + "PeakSearching", + 4, + [ + ("Search raw images", 0, self.searchraw), + ("Read pks file", 0, self.readpeaks), + ("Harvest peaks", 0, self.harvestpeaks), + ("Merge peaks", 0, self.mergepeaks), + ("Filter peaks", 0, self.filter), + ("Save good peaks", 2, self.savepeaks), + ], + ) def searchraw(self): - """ Explains to user about the command line script """ - showinfo("Sorry","""Not implemented for gui, please use the jonpeaksearch script on amber/crunch for now""") + """Explains to user about the command line script""" + showinfo( + "Sorry", + """Not implemented for gui, please use the jonpeaksearch script on amber/crunch for now""", + ) - def readpeaks(self,filename=None): + def readpeaks(self, filename=None): """ Runs peakmerger.readpeaks gets names for first and last image for plot title @@ -64,26 +71,31 @@ def readpeaks(self,filename=None): if filename is None: filename = self.parent.opener.show(title="Peaksearch results file") import os + if os.path.isfile(filename): # apply this to peakmerge - self.parent.guicommander.execute("peakmerger","readpeaks",filename) + self.parent.guicommander.execute("peakmerger", "readpeaks", filename) else: - showinfo("Sorry, bad filename %s"%(filename)) - imagenumbers = self.parent.guicommander.getdata("peakmerger","imagenumbers") - omegas = self.parent.guicommander.getdata("peakmerger","omegas") - images = self.parent.guicommander.getdata("peakmerger","images") + showinfo("Sorry, bad filename %s" % (filename)) + imagenumbers = self.parent.guicommander.getdata("peakmerger", "imagenumbers") + omegas = self.parent.guicommander.getdata("peakmerger", "omegas") + images = self.parent.guicommander.getdata("peakmerger", "images") first = images[0].name last = images[-1].name self.parent.twodplotter.adddata( - ("number versus omega", - twodplot.data( - imagenumbers,omegas, - {"xlabel" : "imagenumber", - "ylabel" : "Omega", - "title" : first+"..."+last } - ) # data - )# tuple to plotter - ) # call + ( + "number versus omega", + twodplot.data( + imagenumbers, + omegas, + { + "xlabel": "imagenumber", + "ylabel": "Omega", + "title": first + "..." + last, + }, + ), # data + ) # tuple to plotter + ) # call def harvestpeaks(self): """ @@ -91,59 +103,74 @@ def harvestpeaks(self): calls peakmerger.harvestpeaks(image_number_range,omega_range) """ # Now we need to select the range of peaks to use - if self.quiet=="No": - ans = askyesno("Have you selected a sensible range of images?",""" + if self.quiet == "No": + ans = askyesno( + "Have you selected a sensible range of images?", + """ Use the mouse to select the range of image numbers and omega angles that you want to use from the graph on the screen. If all is OK, then say yes now and we will try to harvest the peaks. Otherwise, say no, select the right range and come back "harvestpeaks" again - """ ) + """, + ) # FIXME? ans is ignored anyway # We now have the ranges in imagenumber and omega from # the graph - numlim=self.parent.twodplotter.a.get_xlim() - omlim=self.parent.twodplotter.a.get_ylim() - self.parent.guicommander.execute("peakmerger","harvestpeaks",numlim=numlim,omlim=omlim) - if self.quiet=="No": - npks = len(self.parent.guicommander.getdata("peakmerger","allpeaks") ) - showinfo("Harvested peaks","You have a total of %d peaks,"%(npks)+ - "no peaks have been merged") + numlim = self.parent.twodplotter.a.get_xlim() + omlim = self.parent.twodplotter.a.get_ylim() + self.parent.guicommander.execute( + "peakmerger", "harvestpeaks", numlim=numlim, omlim=omlim + ) + if self.quiet == "No": + npks = len(self.parent.guicommander.getdata("peakmerger", "allpeaks")) + showinfo( + "Harvested peaks", + "You have a total of %d peaks," % (npks) + "no peaks have been merged", + ) def mergepeaks(self): - """ calls peakmerger.mergepeaks and reports number of peaks to user """ - self.parent.guicommander.execute("peakmerger","mergepeaks") - nmerged = len(self.parent.guicommander.getdata("peakmerger","merged")) - if self.quiet=="No": - showinfo("Finished merging peaks","You have a total of "+str(nmerged)+" after merging") + """calls peakmerger.mergepeaks and reports number of peaks to user""" + self.parent.guicommander.execute("peakmerger", "mergepeaks") + nmerged = len(self.parent.guicommander.getdata("peakmerger", "merged")) + if self.quiet == "No": + showinfo( + "Finished merging peaks", + "You have a total of " + str(nmerged) + " after merging", + ) return def filter(self): - """ calls peakmerger.filter (does very little) + """calls peakmerger.filter (does very little) plots x and y final peak positions TODO implement filters!!! """ - self.parent.guicommander.execute("peakmerger","filter") - peaks = self.parent.guicommander.getdata("peakmerger","finalpeaks") - if self.quiet=="No": - self.parent.twodplotter.hideall() # get rid of image number versus omega plot + self.parent.guicommander.execute("peakmerger", "filter") + peaks = self.parent.guicommander.getdata("peakmerger", "finalpeaks") + if self.quiet == "No": + self.parent.twodplotter.hideall() # get rid of image number versus omega plot self.parent.twodplotter.adddata( - ( "Filtered peaks", - twodplot.data( - peaks[0,:], - peaks[1,:], - {"xlabel" : "x", - "ylabel" : "y", - "title" : "Peak positions on detector"} - ) ) ) + ( + "Filtered peaks", + twodplot.data( + peaks[0, :], + peaks[1, :], + { + "xlabel": "x", + "ylabel": "y", + "title": "Peak positions on detector", + }, + ), + ) + ) return "nothing" # Need to filter based on x,y # also based on intensity # also based on shape - def savepeaks(self,filename=None): - """ see peakmerger.savepeaks """ - if filename==None: - filename=self.parent.saver.show(title="Filtered peak positions") - self.parent.guicommander.execute("peakmerger","savepeaks",filename) + def savepeaks(self, filename=None): + """see peakmerger.savepeaks""" + if filename == None: + filename = self.parent.saver.show(title="Filtered peak positions") + self.parent.guicommander.execute("peakmerger", "savepeaks", filename) diff --git a/ImageD11/tkGui/guisolver.py b/ImageD11/tkGui/guisolver.py index fdcec572..909650bc 100644 --- a/ImageD11/tkGui/guisolver.py +++ b/ImageD11/tkGui/guisolver.py @@ -1,4 +1,3 @@ - # Get Strain/Stress from ImageD11 UBI/map files # Copyright (C) 2015 Younes ELHACHI # @@ -29,49 +28,52 @@ class guisolver: - - def __init__(self,parent): + def __init__(self, parent): """ Parent (arg) is a hook to features of the parent gui sets up eps_sig_solver menuitems """ - self.parent=parent - - self.menuitems = ( "Strain/Stress", 0, - [ ( "Load ubis", 0, self.loadubis), - ( "Load parameters", 1, self.loadfileparameters), - ( "Edit parameters", 0, self.editparameters), - ( "Compute and save strain and stress", 0, self.compute_save_epsig), - ( "Save parameters", 0, self.saveparameters) - ] ) - + self.parent = parent + + self.menuitems = ( + "Strain/Stress", + 0, + [ + ("Load ubis", 0, self.loadubis), + ("Load parameters", 1, self.loadfileparameters), + ("Edit parameters", 0, self.editparameters), + ("Compute and save strain and stress", 0, self.compute_save_epsig), + ("Save parameters", 0, self.saveparameters), + ], + ) def loadubis(self): - """ see eps_sig_solver.loadmap """ - filename=self.parent.opener.show( + """see eps_sig_solver.loadmap""" + filename = self.parent.opener.show( title="File containing ubi matrices", - filetypes=[ ("Grain map files", "*.map"), - ("UBI files", "*.ubi") ] ) - self.parent.guicommander.execute("solver","loadmap",filename) + filetypes=[("Grain map files", "*.map"), ("UBI files", "*.ubi")], + ) + self.parent.guicommander.execute("solver", "loadmap", filename) def compute_save_epsig(self): - """ see eps_sig_solver.compute_eps_sig """ - filename=self.parent.saver.show(title="File to save strain and stress") - self.parent.guicommander.execute("solver","compute_write_eps_sig",filename) + """see eps_sig_solver.compute_eps_sig""" + filename = self.parent.saver.show(title="File to save strain and stress") + self.parent.guicommander.execute("solver", "compute_write_eps_sig", filename) def loadfileparameters(self): - """ see eps_sig_solver.loadpars and parameters.loadpars """ - filename=self.parent.opener.show( + """see eps_sig_solver.loadpars and parameters.loadpars""" + filename = self.parent.opener.show( title="File containing unit cell and elastic constants", - filetypes = [ ("Parameter files", "*.prm"), - ("Parameter files", "*.par") ] ) - self.parent.guicommander.execute("solver","loadpars",filename) + filetypes=[("Parameter files", "*.prm"), ("Parameter files", "*.par")], + ) + self.parent.guicommander.execute("solver", "loadpars", filename) def saveparameters(self): - """ see eps_sig_solver.savepars and parameters.savepars """ - filename=self.parent.saver.show(title="File to save unit cell and elastic constants") - self.parent.guicommander.execute("solver","savepars",filename) - + """see eps_sig_solver.savepars and parameters.savepars""" + filename = self.parent.saver.show( + title="File to save unit cell and elastic constants" + ) + self.parent.guicommander.execute("solver", "savepars", filename) def editparameters(self): """ @@ -83,10 +85,13 @@ def editparameters(self): eg : loadpars(None) """ # First make the eps_sig_solver update its parameters object - self.parent.guicommander.execute("solver","updateparameters") # no filename arg + self.parent.guicommander.execute( + "solver", "updateparameters" + ) # no filename arg # Now borrow a copy to read them and edit - pars = self.parent.guicommander.getdata("solver","pars") - d=listdialog(self.parent,items=pars,title="unit cell and elastic constants") - self.parent.guicommander.execute("solver","parameterobj.set_parameters",d.result) - self.parent.guicommander.execute("solver","loadpars") # and use them - + pars = self.parent.guicommander.getdata("solver", "pars") + d = listdialog(self.parent, items=pars, title="unit cell and elastic constants") + self.parent.guicommander.execute( + "solver", "parameterobj.set_parameters", d.result + ) + self.parent.guicommander.execute("solver", "loadpars") # and use them diff --git a/ImageD11/tkGui/guitransformer.py b/ImageD11/tkGui/guitransformer.py index 5e45462e..952da8f2 100644 --- a/ImageD11/tkGui/guitransformer.py +++ b/ImageD11/tkGui/guitransformer.py @@ -1,4 +1,3 @@ - from __future__ import print_function # ImageD11_v0.4 Software for beamline ID11 @@ -20,9 +19,10 @@ import numpy as np -#try: + +# try: # from Tkinter import * -#except: +# except: # # python3? # from tkinter import * @@ -32,120 +32,128 @@ import logging -class guitransformer: - def __init__(self,parent,quiet="No"): +class guitransformer: + def __init__(self, parent, quiet="No"): """ Parent is a hook to features of the parent gui """ - self.quiet=quiet - self.parent=parent - self.menuitems = ( "Transformation", 0, - [ ( "Load filtered peaks", 0, self.loadfiltered), - ( "Plot y/z", 5, self.plotyz ), - ( "Load parameters", 1, self.loadfileparameters), - ( "Edit parameters", 0, self.editparameters), - ( "Plot tth/eta", 0, self.plotreta ), - ( "Add unit cell peaks",0, self.addcellpeaks), - ( "Fit",0, self.fit), - ( "Save parameters", 0, self.saveparameters), -# ( "Plot selected columns", 0, self.plotcols), - ( "Plot tth histogram", 0, self.plothisto ), - ( "Export tth histogram", 0, self.savehisto ), - ( "Filter peaks based on tth histogram", 0, self.filterhisto ), - ( "Compute g-vectors", 0, self.computegv), - ( "Save g-vectors", 0, self.savegv), - ( "Save new colfile", 0, self.savecolfile), - ( "Write graindex finalpeaks.log",0, self.write_graindex_gv), - ( "Write pyFAI data file", 0, self.write_pyFAI) - ] ) + self.quiet = quiet + self.parent = parent + self.menuitems = ( + "Transformation", + 0, + [ + ("Load filtered peaks", 0, self.loadfiltered), + ("Plot y/z", 5, self.plotyz), + ("Load parameters", 1, self.loadfileparameters), + ("Edit parameters", 0, self.editparameters), + ("Plot tth/eta", 0, self.plotreta), + ("Add unit cell peaks", 0, self.addcellpeaks), + ("Fit", 0, self.fit), + ("Save parameters", 0, self.saveparameters), + # ( "Plot selected columns", 0, self.plotcols), + ("Plot tth histogram", 0, self.plothisto), + ("Export tth histogram", 0, self.savehisto), + ("Filter peaks based on tth histogram", 0, self.filterhisto), + ("Compute g-vectors", 0, self.computegv), + ("Save g-vectors", 0, self.savegv), + ("Save new colfile", 0, self.savecolfile), + ("Write graindex finalpeaks.log", 0, self.write_graindex_gv), + ("Write pyFAI data file", 0, self.write_pyFAI), + ], + ) def loadfiltered(self): - filename=self.parent.opener.show(title= - "File containing filtered peaks", - filetypes=[("filtered peaks", "*.flt"), - ("All Files ", "*")]) - self.parent.guicommander.execute("transformer", - "loadfiltered",filename) + filename = self.parent.opener.show( + title="File containing filtered peaks", + filetypes=[("filtered peaks", "*.flt"), ("All Files ", "*")], + ) + self.parent.guicommander.execute("transformer", "loadfiltered", filename) def loadfileparameters(self): - filename=self.parent.opener.show(title= - "File containing detector parameters", - filetypes=[("parameter files", "*.par"), - ("parameter files", "*.pars"), - ("parameter files", "*.prm"), - ("All Files ", "*")]) - self.parent.guicommander.execute("transformer", - "loadfileparameters",filename) - - def saveparameters(self,filename=None): - if filename==None: - filename=self.parent.saver.show(title= - "File to save detector parameters") - self.parent.guicommander.execute("transformer", - "saveparameters",filename) - + filename = self.parent.opener.show( + title="File containing detector parameters", + filetypes=[ + ("parameter files", "*.par"), + ("parameter files", "*.pars"), + ("parameter files", "*.prm"), + ("All Files ", "*"), + ], + ) + self.parent.guicommander.execute("transformer", "loadfileparameters", filename) + + def saveparameters(self, filename=None): + if filename == None: + filename = self.parent.saver.show(title="File to save detector parameters") + self.parent.guicommander.execute("transformer", "saveparameters", filename) def editparameters(self): """ Gets a copy of the parameter object Allows user to edit parameters """ - self.parent.guicommander.execute("transformer","updateparameters") - pars = self.parent.guicommander.getdata("transformer","pars") - vars = self.parent.guicommander.execute("transformer","getvars") - possvars = self.parent.guicommander.execute("transformer", - "get_variable_list") - logging.debug("possible variables "+str(possvars)) + self.parent.guicommander.execute("transformer", "updateparameters") + pars = self.parent.guicommander.getdata("transformer", "pars") + vars = self.parent.guicommander.execute("transformer", "getvars") + possvars = self.parent.guicommander.execute("transformer", "get_variable_list") + logging.debug("possible variables " + str(possvars)) # wtf? logic = {} for v in possvars: if v in vars: - logic[v]=1 + logic[v] = 1 else: - logic[v]=0 - logging.debug("transformer pars: %s"% (str(pars))) - d = listdialog(self.parent,items=pars,title="Detector parameters", - logic=logic) - self.parent.guicommander.execute("transformer", - "parameterobj.set_parameters", - d.result) + logic[v] = 0 + logging.debug("transformer pars: %s" % (str(pars))) + d = listdialog( + self.parent, items=pars, title="Detector parameters", logic=logic + ) + self.parent.guicommander.execute( + "transformer", "parameterobj.set_parameters", d.result + ) # wtf d.fv vars = [] - print("d.fv",d.fv) + print("d.fv", d.fv) for v in possvars: - logging.debug(str(v)+" "+str(d.fv[v])) - if d.fv[v]==1: + logging.debug(str(v) + " " + str(d.fv[v])) + if d.fv[v] == 1: vars.append(v) - logging.debug("vars: "+str(vars)) - self.parent.guicommander.execute("transformer", - "parameterobj.set_varylist",vars) + logging.debug("vars: " + str(vars)) + self.parent.guicommander.execute( + "transformer", "parameterobj.set_varylist", vars + ) def plotyz(self): """ Plots the x,y arrays being used """ - xname = self.parent.guicommander.getdata("transformer","xname") - yname = self.parent.guicommander.getdata("transformer","yname") - x = self.parent.guicommander.execute("transformer", - "getcolumn", xname ) - y = self.parent.guicommander.execute("transformer", - "getcolumn", yname ) + xname = self.parent.guicommander.getdata("transformer", "xname") + yname = self.parent.guicommander.getdata("transformer", "yname") + x = self.parent.guicommander.execute("transformer", "getcolumn", xname) + y = self.parent.guicommander.execute("transformer", "getcolumn", yname) self.parent.twodplotter.hideall() self.parent.twodplotter.adddata( - ( "Filtered peaks", - twodplot.data( - x, y, - { "xlabel" : xname, - "ylabel" : yname, - "title" : "Peak positions in array", - 'plotopts' : {'color':'g', - 'marker':'.', - 'markersize': 1, - 'linestyle' : 'none', - 'alpha':0.8} - } - ))) + ( + "Filtered peaks", + twodplot.data( + x, + y, + { + "xlabel": xname, + "ylabel": yname, + "title": "Peak positions in array", + "plotopts": { + "color": "g", + "marker": ".", + "markersize": 1, + "linestyle": "none", + "alpha": 0.8, + }, + }, + ), + ) + ) def chooseyz(self): """ @@ -154,179 +162,197 @@ def chooseyz(self): pass def plotcols(self): - names = self.parent.guicommander.execute("transformer","getcols") + names = self.parent.guicommander.execute("transformer", "getcols") print(names) d = columnchooser(self.parent, names) print(d.result) - def fit(self): tthmin = self.parent.twodplotter.a.get_xlim()[0] tthmax = self.parent.twodplotter.a.get_xlim()[1] - self.parent.guicommander.execute("transformer","fit",tthmin,tthmax) + self.parent.guicommander.execute("transformer", "fit", tthmin, tthmax) self.plotreta() def plotreta(self): - self.parent.guicommander.execute("transformer","compute_tth_eta") - tth = self.parent.guicommander.execute("transformer","getcolumn","tth") - eta = self.parent.guicommander.execute("transformer","getcolumn","eta") + self.parent.guicommander.execute("transformer", "compute_tth_eta") + tth = self.parent.guicommander.execute("transformer", "getcolumn", "tth") + eta = self.parent.guicommander.execute("transformer", "getcolumn", "eta") self.parent.twodplotter.adddata( - ( "2Theta/Eta", - twodplot.data( + ( + "2Theta/Eta", + twodplot.data( tth, eta, - {"xlabel":"TwoTheta / degrees", - "ylabel":"Azimuth / degrees", - "title" :"Peak positions", - 'plotopts' : {'color':'g', - 'marker':'.', - 'markersize': 1, - 'linestyle' : 'none', - 'alpha':0.8} -} - ))) - - def plothisto(self, nbins = None): + { + "xlabel": "TwoTheta / degrees", + "ylabel": "Azimuth / degrees", + "title": "Peak positions", + "plotopts": { + "color": "g", + "marker": ".", + "markersize": 1, + "linestyle": "none", + "alpha": 0.8, + }, + }, + ), + ) + ) + + def plothisto(self, nbins=None): if nbins is None: - nbins = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "no_bins") - doweight = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "weight_hist_intensities") - d = listdialog( self.parent, - items={"no_bins": nbins, "weight_hist_intensities": doweight}, - title="Histogram - no of bins") - - nbins = int(d.result['no_bins']) - doweight = int(d.result['weight_hist_intensities']) - - self.parent.guicommander.execute("transformer", - "parameterobj.set_parameters", - d.result) + nbins = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "no_bins" + ) + doweight = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "weight_hist_intensities" + ) + d = listdialog( + self.parent, + items={"no_bins": nbins, "weight_hist_intensities": doweight}, + title="Histogram - no of bins", + ) + + nbins = int(d.result["no_bins"]) + doweight = int(d.result["weight_hist_intensities"]) + + self.parent.guicommander.execute( + "transformer", "parameterobj.set_parameters", d.result + ) else: - self.parent.guicommander.execute("transformer", - "parameterobj.set", - "no_bins", nbins) + self.parent.guicommander.execute( + "transformer", "parameterobj.set", "no_bins", nbins + ) - bins, hist = self.parent.guicommander.execute("transformer", - "compute_tth_histo") + bins, hist = self.parent.guicommander.execute( + "transformer", "compute_tth_histo" + ) self.parent.twodplotter.adddata( - ( "2Theta/Eta", - twodplot.data( + ( + "2Theta/Eta", + twodplot.data( bins, hist, - {"xlabel":"TwoTheta / degrees", - "ylabel":"No in bin", - "title" :"TwoTheta histogram", - - } - ))) - - def savehisto(self, nbins = None): + { + "xlabel": "TwoTheta / degrees", + "ylabel": "No in bin", + "title": "TwoTheta histogram", + }, + ), + ) + ) + + def savehisto(self, nbins=None): if nbins is None: - nbins = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "no_bins") - doweight = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "weight_hist_intensities") - d = listdialog( self.parent, - items={"no_bins": nbins, "weight_hist_intensities": doweight}, - title="Histogram - no of bins") - - nbins = int(d.result['no_bins']) - doweight = int(d.result['weight_hist_intensities']) - - self.parent.guicommander.execute("transformer", - "parameterobj.set_parameters", - d.result) + nbins = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "no_bins" + ) + doweight = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "weight_hist_intensities" + ) + d = listdialog( + self.parent, + items={"no_bins": nbins, "weight_hist_intensities": doweight}, + title="Histogram - no of bins", + ) + + nbins = int(d.result["no_bins"]) + doweight = int(d.result["weight_hist_intensities"]) + + self.parent.guicommander.execute( + "transformer", "parameterobj.set_parameters", d.result + ) else: - self.parent.guicommander.execute("transformer", - "parameterobj.set", - "no_bins", nbins) - - bins, hist = self.parent.guicommander.execute("transformer", - "compute_tth_histo") - filename=self.parent.saver.show(title="File to save histogram") - self.parent.guicommander.execute("transformer","save_tth_his",filename,bins,hist) - + self.parent.guicommander.execute( + "transformer", "parameterobj.set", "no_bins", nbins + ) + + bins, hist = self.parent.guicommander.execute( + "transformer", "compute_tth_histo" + ) + filename = self.parent.saver.show(title="File to save histogram") + self.parent.guicommander.execute( + "transformer", "save_tth_his", filename, bins, hist + ) def filterhisto(self): - """ Call plot histo, then filter on it """ - nbins = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "no_bins") - - min_bin_prob = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "min_bin_prob") - - doweight = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "weight_hist_intensities") - - d=listdialog(self.parent,items={ - "no_bins": nbins, - "min_bin_prob": min_bin_prob, "weight_hist_intensities": doweight}, - title="Histogram filter") - - - self.parent.guicommander.execute("transformer", - "parameterobj.set_parameters", - d.result) - - min_bin_prob = self.parent.guicommander.execute("transformer", - "parameterobj.get", - "min_bin_prob") + """Call plot histo, then filter on it""" + nbins = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "no_bins" + ) + + min_bin_prob = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "min_bin_prob" + ) + + doweight = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "weight_hist_intensities" + ) + + d = listdialog( + self.parent, + items={ + "no_bins": nbins, + "min_bin_prob": min_bin_prob, + "weight_hist_intensities": doweight, + }, + title="Histogram filter", + ) + + self.parent.guicommander.execute( + "transformer", "parameterobj.set_parameters", d.result + ) + + min_bin_prob = self.parent.guicommander.execute( + "transformer", "parameterobj.get", "min_bin_prob" + ) self.plothisto(nbins) - self.parent.guicommander.execute("transformer", - "filter_min", - "tth_hist_prob", - min_bin_prob) - + self.parent.guicommander.execute( + "transformer", "filter_min", "tth_hist_prob", min_bin_prob + ) def addcellpeaks(self): - self.parent.guicommander.execute("transformer","addcellpeaks") - tth=self.parent.guicommander.getdata("transformer","theorytth") + self.parent.guicommander.execute("transformer", "addcellpeaks") + tth = self.parent.guicommander.getdata("transformer", "theorytth") self.parent.twodplotter.adddata( - ( "HKL peaks", - twodplot.data( - tth, - np.zeros(tth.shape[0]), - {'plotopts' : {'color':'r', - 'marker':'|', - 'markersize': 50, - 'linestyle' : 'none', - 'alpha':1.0} - } - ))) + ( + "HKL peaks", + twodplot.data( + tth, + np.zeros(tth.shape[0]), + { + "plotopts": { + "color": "r", + "marker": "|", + "markersize": 50, + "linestyle": "none", + "alpha": 1.0, + } + }, + ), + ) + ) def computegv(self): - self.parent.guicommander.execute("transformer","computegv") + self.parent.guicommander.execute("transformer", "computegv") def savegv(self): - filename=self.parent.saver.show(title="File to save gvectors") - self.parent.guicommander.execute("transformer","savegv",filename) + filename = self.parent.saver.show(title="File to save gvectors") + self.parent.guicommander.execute("transformer", "savegv", filename) def savecolfile(self): - filename=self.parent.saver.show(title="File to save newcolfile") - self.parent.guicommander.execute("transformer", - "write_colfile", - filename) - + filename = self.parent.saver.show(title="File to save newcolfile") + self.parent.guicommander.execute("transformer", "write_colfile", filename) def write_graindex_gv(self): - filename=self.parent.saver.show(title="File for graindex, try finalpeaks.log") - self.parent.guicommander.execute("transformer","write_graindex_gv",filename) - - + filename = self.parent.saver.show(title="File for graindex, try finalpeaks.log") + self.parent.guicommander.execute("transformer", "write_graindex_gv", filename) def write_pyFAI(self): tthmin = self.parent.twodplotter.a.get_xlim()[0] tthmax = self.parent.twodplotter.a.get_xlim()[1] - filename=self.parent.saver.show(title="File for pyFAI, try data.py") - self.parent.guicommander.execute("transformer","write_pyFAI",filename, - tthmin,tthmax) - + filename = self.parent.saver.show(title="File for pyFAI, try data.py") + self.parent.guicommander.execute( + "transformer", "write_pyFAI", filename, tthmin, tthmax + ) diff --git a/ImageD11/tkGui/listdialog.py b/ImageD11/tkGui/listdialog.py index 8905f94c..d71555ef 100644 --- a/ImageD11/tkGui/listdialog.py +++ b/ImageD11/tkGui/listdialog.py @@ -1,4 +1,3 @@ - from __future__ import print_function @@ -13,13 +12,13 @@ import tkinter as Tk - class listdialog(Tk.Toplevel): """ Dialog box for setting detector parameters Takes a list of strings and numbers """ - def __init__(self, parent, title = None, items=None, logic = None): + + def __init__(self, parent, title=None, items=None, logic=None): Tk.Toplevel.__init__(self, parent) self.transient(parent) if title: @@ -29,41 +28,41 @@ def __init__(self, parent, title = None, items=None, logic = None): self.parent = parent self.result = items body = Tk.Frame(self) - self.initial_focus = self.body(body,items,logic) + self.initial_focus = self.body(body, items, logic) body.pack(padx=5, pady=5) self.buttonbox() self.grab_set() if not self.initial_focus: self.initial_focus = self self.protocol("WM_DELETE_WINDOW", self.cancel) - self.geometry("+%d+%d" % (parent.winfo_rootx()+50, - parent.winfo_rooty()+50)) + self.geometry("+%d+%d" % (parent.winfo_rootx() + 50, parent.winfo_rooty() + 50)) self.initial_focus.focus_set() self.wait_window(self) - + def body(self, master, items, logic=None): # create dialog body. return widget that should have # initial focus. this method should be overridden - self.e=[] + self.e = [] if items is not None: - i=0 - keys=list(items.keys()) + i = 0 + keys = list(items.keys()) keys.sort() - self.keys=keys + self.keys = keys for key in keys: - Tk.Label(master,text=key).grid(row=i) - el=Tk.Entry(master) - el.insert(Tk.END,items[key]) - el.grid(row=i,column=1) + Tk.Label(master, text=key).grid(row=i) + el = Tk.Entry(master) + el.insert(Tk.END, items[key]) + el.grid(row=i, column=1) self.e.append(el) if logic is not None and key in logic: val = logic[key] self.logicvars[key] = Tk.IntVar() self.logicvars[key].set(val) - b=Tk.Checkbutton(master,text="Vary?", - variable=self.logicvars[key]) - b.grid(row=i,column=2) - i=i+1 + b = Tk.Checkbutton( + master, text="Vary?", variable=self.logicvars[key] + ) + b.grid(row=i, column=2) + i = i + 1 return self.e[0] def buttonbox(self): @@ -77,11 +76,12 @@ def buttonbox(self): self.bind("", self.ok) self.bind("", self.cancel) box.pack() + # # standard button semantics def ok(self, event=None): if not self.validate(): - self.initial_focus.focus_set() # put focus back + self.initial_focus.focus_set() # put focus back return self.withdraw() self.update_idletasks() @@ -92,22 +92,23 @@ def cancel(self, event=None): # put focus back to the parent window self.parent.focus_set() self.destroy() + # # command hooks def validate(self): - return 1 # override + return 1 # override def apply(self): - retdict={} - i=0 + retdict = {} + i = 0 self.fv = {} for item in self.e: k = self.keys[i] - retdict[k]=item.get() + retdict[k] = item.get() if self.logic is not None and k in self.logic: - self.fv[k]=self.logicvars[k].get() - i=i+1 - self.result=retdict + self.fv[k] = self.logicvars[k].get() + i = i + 1 + self.result = retdict print(self.result) @@ -116,6 +117,7 @@ class columnchooser(listdialog): Dialog box for setting detector parameters Takes a list of strings and numbers """ + def __init__(self, parent, items, title="Choose two columns"): Tk.Toplevel.__init__(self, parent) self.transient(parent) @@ -125,17 +127,15 @@ def __init__(self, parent, items, title="Choose two columns"): listbox1 = Tk.Listbox(body) listbox2 = Tk.Listbox(body) for i in items: - listbox1.insert(Tk.END,i) - listbox2.insert(Tk.END,i) + listbox1.insert(Tk.END, i) + listbox2.insert(Tk.END, i) body.pack(padx=5, pady=5) listbox1.pack() listbox2.pack() - self.initial_focus=body + self.initial_focus = body self.buttonbox() self.grab_set() self.protocol("WM_DELETE_WINDOW", self.cancel) - self.geometry("+%d+%d" % (parent.winfo_rootx()+50, - parent.winfo_rooty()+50)) + self.geometry("+%d+%d" % (parent.winfo_rootx() + 50, parent.winfo_rooty() + 50)) self.initial_focus.focus_set() self.wait_window(self) - diff --git a/ImageD11/tkGui/plot3d.py b/ImageD11/tkGui/plot3d.py index 95acd5c0..e079bf3e 100644 --- a/ImageD11/tkGui/plot3d.py +++ b/ImageD11/tkGui/plot3d.py @@ -6,14 +6,13 @@ from example by Tarn Weisner Burton in pyopengl """ -__author__ = 'Jon Wright from example by Tarn Weisner Burton ' +__author__ = "Jon Wright from example by Tarn Weisner Burton " import numpy import sys -import os from pyopengltk import Opengl import OpenGL.GL as GL -import OpenGL.GLU as GLU + if sys.version_info[0] < 3: import Tkinter as Tk else: @@ -22,165 +21,193 @@ class myOpengl(Opengl): - # Make a parallel projection # mostly copied from Tk.Opengl class with small mods def tkRedraw(self, *dummy): """Cause the opengl widget to redraw itself.""" - if not self.initialised: + if not self.initialised: return self.activate() - #print self.distance - GL.glPushMatrix() # Protect our matrix + # print self.distance + GL.glPushMatrix() # Protect our matrix self.update_idletasks() self.activate() w = self.winfo_width() h = self.winfo_height() GL.glViewport(0, 0, w, h) # Clear the background and depth buffer. - GL.glClearColor(self.r_back, self.g_back, self.b_back, 0.) + GL.glClearColor(self.r_back, self.g_back, self.b_back, 0.0) GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) GL.glMatrixMode(GL.GL_PROJECTION) GL.glLoadIdentity() - r = 1.*w/h - GL.glOrtho( -self.distance*r, self.distance*r, -self.distance, self.distance, - -self.distance*3, self.distance*3) -# GLU.gluPerspective(self.fovy, float(w)/float(h), self.near, self.far) + r = 1.0 * w / h + GL.glOrtho( + -self.distance * r, + self.distance * r, + -self.distance, + self.distance, + -self.distance * 3, + self.distance * 3, + ) + # GLU.gluPerspective(self.fovy, float(w)/float(h), self.near, self.far) GL.glMatrixMode(GL.GL_MODELVIEW) self.redraw(self) - GL.glFlush() # Tidy up - GL.glPopMatrix() # Restore the matrix -# self.tk.call(self._w, 'swapbuffers') + GL.glFlush() # Tidy up + GL.glPopMatrix() # Restore the matrix + # self.tk.call(self._w, 'swapbuffers') self.tkSwapBuffers() - class plot3d(Tk.Toplevel): - def __init__(self,parent,data=None,lines=None, - ubis=None,image=None,pars=None,spline=None): + def __init__( + self, + parent, + data=None, + lines=None, + ubis=None, + image=None, + pars=None, + spline=None, + ): """ Data would be your observed g-vectors. Lines will be a computed lattice """ - Tk.Toplevel.__init__(self,parent) - self.parent=parent + Tk.Toplevel.__init__(self, parent) + self.parent = parent if data is not None: - xyz=data.copy() + xyz = data.copy() else: - xyz=numpy.array([0,0,0]) - self.ps=Tk.StringVar() - self.ps.set('1.') - self.pointsize=1. - self.npeaks=xyz.shape[0] + xyz = numpy.array([0, 0, 0]) + self.ps = Tk.StringVar() + self.ps.set("1.") + self.pointsize = 1.0 + self.npeaks = xyz.shape[0] - self.o = myOpengl(self, width = 400, height = 400) + self.o = myOpengl(self, width=400, height=400) self.o.redraw = self.redraw self.o.autospin_allowed = 1 - self.o.fovy=5 - self.o.near=1e6 - self.o.far=1e-6 - import math - self.o.distance=3. -#numpy.maximum.reduce(numpy.ravel(xyz))*4 / \ -# math.tan(self.o.fovy*math.pi/180) - print(type(xyz),xyz.dtype.char,xyz.shape) - self.xyz=xyz - f=Tk.Frame(self) - Tk.Button(f,text="Help",command=self.o.help).pack(side=Tk.LEFT) - Tk.Button(f,text="Reset",command=self.o.reset).pack(side=Tk.LEFT) - Tk.Button(f,text="Pointsize",command=self.setps).pack(side=Tk.LEFT) - Tk.Entry(f,textvariable=self.ps).pack(side=Tk.LEFT) - Tk.Button(f,text="Quit",command=self.goaway).pack(side=Tk.RIGHT) - self.dataoff=0 - self.o.pack(side = 'top', expand = 1, fill = 'both') - f.pack(side=Tk.BOTTOM,expand=Tk.NO,fill=Tk.X) - Tk.Label(self,text="Red=[1,0,0] Green=[0,1,0] Blue=[0,0,1]").pack( - side=Tk.BOTTOM,expand=Tk.NO,fill=Tk.X) - self.ubis=ubis - self.color=numpy.ones((xyz.shape[0],3),float) + self.o.fovy = 5 + self.o.near = 1e6 + self.o.far = 1e-6 + + self.o.distance = 3.0 + # numpy.maximum.reduce(numpy.ravel(xyz))*4 / \ + # math.tan(self.o.fovy*math.pi/180) + print(type(xyz), xyz.dtype.char, xyz.shape) + self.xyz = xyz + f = Tk.Frame(self) + Tk.Button(f, text="Help", command=self.o.help).pack(side=Tk.LEFT) + Tk.Button(f, text="Reset", command=self.o.reset).pack(side=Tk.LEFT) + Tk.Button(f, text="Pointsize", command=self.setps).pack(side=Tk.LEFT) + Tk.Entry(f, textvariable=self.ps).pack(side=Tk.LEFT) + Tk.Button(f, text="Quit", command=self.goaway).pack(side=Tk.RIGHT) + self.dataoff = 0 + self.o.pack(side="top", expand=1, fill="both") + f.pack(side=Tk.BOTTOM, expand=Tk.NO, fill=Tk.X) + Tk.Label(self, text="Red=[1,0,0] Green=[0,1,0] Blue=[0,0,1]").pack( + side=Tk.BOTTOM, expand=Tk.NO, fill=Tk.X + ) + self.ubis = ubis + self.color = numpy.ones((xyz.shape[0], 3), float) print(self.color.shape) - self.tex=False + self.tex = False if ubis is not None: - self.ubis = self.readubis(ubis) - self.scorecolor(0) + self.ubis = self.readubis(ubis) + self.scorecolor(0) if pars is not None: - self.tex=True - self.readspline(spline) - self.readprms(pars) - self.readimage(image) + self.tex = True + self.readspline(spline) + self.readprms(pars) + self.readimage(image) self.after(100, self.changedata) - def readspline(self,spline): + def readspline(self, spline): from ImageD11 import blobcorrector + self.corrector = blobcorrector.correctorclass(spline) - def readubis(self,ubis): + def readubis(self, ubis): from ImageD11 import indexing + return indexing.readubis(ubis) - def readprms(self,prms): + def readprms(self, prms): from ImageD11 import parameters + o = parameters.parameters() o.loadparameters(prms) - self.pars=o.get_parameters() + self.pars = o.get_parameters() - def readimage(self,image): + def readimage(self, image): from ImageD11 import transform from fabio import openimage - self.imageobj=openimage.openimage(image) + + self.imageobj = openimage.openimage(image) # map from 2048x2048 to 1024x1024 d = self.imageobj.data.astype(numpy.float32) - mi= d.mean() - d.std()*2 - mx= d.mean() * d.std()*2 - shape=self.imageobj.data.shape - d=numpy.reshape(numpy.clip(self.imageobj.data,mi,mx),shape) # makes a clipped copy - d=(255.*(d-mi)/(mx-mi)) # scale intensity - print(d.min(),d.max(),d.mean()) - self.image=numpy.zeros((1024,1024),numpy.uint8) - if d.shape==(2048,2048): + mi = d.mean() - d.std() * 2 + mx = d.mean() * d.std() * 2 + shape = self.imageobj.data.shape + d = numpy.reshape( + numpy.clip(self.imageobj.data, mi, mx), shape + ) # makes a clipped copy + d = 255.0 * (d - mi) / (mx - mi) # scale intensity + print(d.min(), d.max(), d.mean()) + self.image = numpy.zeros((1024, 1024), numpy.uint8) + if d.shape == (2048, 2048): # rebin 2x2 - im=(d[::2,::2]+d[::2,1::2]+d[1::2,::2]+d[1::2,1::2])/4 - self.image=(255-im).astype(numpy.uint8).tostring() - self.imageWidth=1024 - self.imageHeight=1024 + im = (d[::2, ::2] + d[::2, 1::2] + d[1::2, ::2] + d[1::2, 1::2]) / 4 + self.image = (255 - im).astype(numpy.uint8).tostring() + self.imageWidth = 1024 + self.imageHeight = 1024 # make a 2D array of x,y - p=[] - pk=[] + p = [] + pk = [] step = 64 - r=[ [ 0,0 ], [0,step], [step,step], [step,0] ] - for i in range(0,1024,step): - for j in range(0,1024,step): + r = [[0, 0], [0, step], [step, step], [step, 0]] + for i in range(0, 1024, step): + for j in range(0, 1024, step): # i,j 1024x1024 texture coords # x,y spatially corrected for v in r: - pk.append([i+v[0],j+v[1]]) - x,y = self.corrector.correct((i+v[0])*2 , (j+v[1])*2) # corrected - p.append([x,y]) - p=numpy.array(p).T - pk=numpy.array(pk).T - omega=float(self.imageobj.header['Omega']) - self.pars['distance']=float(self.pars['distance'])*1000 - tth,eta=transform.compute_tth_eta(p,**self.pars) - gve = transform.compute_g_vectors(tth,eta,omega*self.pars['omegasign'],self.pars['wavelength']) + pk.append([i + v[0], j + v[1]]) + x, y = self.corrector.correct( + (i + v[0]) * 2, (j + v[1]) * 2 + ) # corrected + p.append([x, y]) + p = numpy.array(p).T + pk = numpy.array(pk).T + omega = float(self.imageobj.header["Omega"]) + self.pars["distance"] = float(self.pars["distance"]) * 1000 + tth, eta = transform.compute_tth_eta(p, **self.pars) + gve = transform.compute_g_vectors( + tth, eta, omega * self.pars["omegasign"], self.pars["wavelength"] + ) self.pts = [] - print("Setting up image mapping",p.shape,gve.shape) + print("Setting up image mapping", p.shape, gve.shape) for i in range(pk.shape[1]): - self.pts.append([pk[1,i]/1024.,pk[0,i]/1024.,gve[0,i],gve[1,i],gve[2,i]]) - #for p in self.pts: + self.pts.append( + [pk[1, i] / 1024.0, pk[0, i] / 1024.0, gve[0, i], gve[1, i], gve[2, i]] + ) + # for p in self.pts: # print p self.setupTexture() def setupTexture(self): GL.glDisable(GL.GL_TEXTURE_2D) GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1) - GL.glTexImage2D(GL.GL_TEXTURE_2D,#target - 0,#level - 3,#internalformat - self.imageWidth, self.imageHeight, - 0,#border - GL.GL_LUMINANCE,#format - GL.GL_UNSIGNED_BYTE,# type - self.image) + GL.glTexImage2D( + GL.GL_TEXTURE_2D, # target + 0, # level + 3, # internalformat + self.imageWidth, + self.imageHeight, + 0, # border + GL.GL_LUMINANCE, # format + GL.GL_UNSIGNED_BYTE, # type + self.image, + ) GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP) GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP) GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT) @@ -192,24 +219,35 @@ def setupTexture(self): GL.glEnable(GL.GL_NORMALIZE) GL.glShadeModel(GL.GL_FLAT) - - - def scorecolor(self,i=0): - cc = [ [ 1,0,0] , [0,1,0] , [0,0,1], [1,1,0], [1,0,1], [0,1,1], - [ 0.5,0,0] , [0,0.5,0] , [0,0,0.5], [0.5,0.5,0], [0.5,0,0.5], - [0,0.5,0.5]] + def scorecolor(self, i=0): + cc = [ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [1, 1, 0], + [1, 0, 1], + [0, 1, 1], + [0.5, 0, 0], + [0, 0.5, 0], + [0, 0, 0.5], + [0.5, 0.5, 0], + [0.5, 0, 0.5], + [0, 0.5, 0.5], + ] if self.ubis is not None: from ImageD11 import indexing - for u,i in zip(self.ubis,list(range(len(self.ubis)))): - scores=indexing.calc_drlv2(u,self.xyz) - print(self.xyz.shape,scores.shape) - ind = numpy.compress( numpy.less(scores,0.05*0.05) , - numpy.arange(self.xyz.shape[0]) ) - print("Grain",i,scores.shape,ind.shape) + + for u, i in zip(self.ubis, list(range(len(self.ubis)))): + scores = indexing.calc_drlv2(u, self.xyz) + print(self.xyz.shape, scores.shape) + ind = numpy.compress( + numpy.less(scores, 0.05 * 0.05), numpy.arange(self.xyz.shape[0]) + ) + print("Grain", i, scores.shape, ind.shape) for j in range(3): - c=numpy.ones(self.color.shape[0]) - numpy.put(c,ind,cc[i%len(cc)][j]) - self.color[:,j]*=c + c = numpy.ones(self.color.shape[0]) + numpy.put(c, ind, cc[i % len(cc)][j]) + self.color[:, j] *= c def go(self): """ @@ -221,118 +259,123 @@ def goaway(self): print("Called goaway") self.o.destroy() self.destroy() - if self.parent is None: sys.exit() + if self.parent is None: + sys.exit() print("Ought to be gone now...") - def changedata(self,xyz=None): + def changedata(self, xyz=None): if xyz is not None: - self.xyz=xyz.copy() - self.npeaks=xyz.shape[0] + self.xyz = xyz.copy() + self.npeaks = xyz.shape[0] GL.glDisableClientState(GL.GL_VERTEX_ARRAY) GL.glDisableClientState(GL.GL_COLOR_ARRAY) - GL.glVertexPointer( 3, GL.GL_FLOAT, 0, self.xyz.astype(numpy.float32).tostring() ) - GL.glColorPointer( 3, GL.GL_FLOAT, 0, self.color.astype(numpy.float32).tostring() ) + GL.glVertexPointer(3, GL.GL_FLOAT, 0, self.xyz.astype(numpy.float32).tostring()) + GL.glColorPointer( + 3, GL.GL_FLOAT, 0, self.color.astype(numpy.float32).tostring() + ) GL.glEnableClientState(GL.GL_VERTEX_ARRAY) GL.glEnableClientState(GL.GL_COLOR_ARRAY) self.o.tkRedraw() def setps(self): - self.pointsize=float(self.ps.get()) + self.pointsize = float(self.ps.get()) self.o.tkRedraw() + def redraw(self, o): - - def redraw(self,o): - GL.glDisable(GL.GL_LIGHTING) - GL.glClearColor(0., 0., 0., 0) + GL.glClearColor(0.0, 0.0, 0.0, 0) GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) - GL.glColor3f(1.0, 1.0, 1.0) # white + GL.glColor3f(1.0, 1.0, 1.0) # white GL.glPointSize(self.pointsize) - GL.glDrawArrays(GL.GL_POINTS, 0, self.npeaks ) + GL.glDrawArrays(GL.GL_POINTS, 0, self.npeaks) - if self.ubis is not None and len(self.ubis)==1: - hkl = numpy.dot(numpy.linalg.inv(self.ubis[0]), - numpy.identity(3,float)).T + if self.ubis is not None and len(self.ubis) == 1: + hkl = numpy.dot(numpy.linalg.inv(self.ubis[0]), numpy.identity(3, float)).T # print hkl else: - hkl = numpy.identity(3,float) + hkl = numpy.identity(3, float) # print hkl - + GL.glBegin(GL.GL_LINE_LOOP) - GL.glColor3f(1.0, 0.0, 0.0) # red - GL.glVertex3f(0.,0.,0.) - GL.glVertex3f(hkl[0][0],hkl[0][1],hkl[0][2]) + GL.glColor3f(1.0, 0.0, 0.0) # red + GL.glVertex3f(0.0, 0.0, 0.0) + GL.glVertex3f(hkl[0][0], hkl[0][1], hkl[0][2]) GL.glEnd() GL.glBegin(GL.GL_LINE_LOOP) - GL.glColor3f(0.0, 1.0, 0.0) # green - GL.glVertex3f(0.,0.,0.) - GL.glVertex3f(hkl[1][0],hkl[1][1],hkl[1][2]) + GL.glColor3f(0.0, 1.0, 0.0) # green + GL.glVertex3f(0.0, 0.0, 0.0) + GL.glVertex3f(hkl[1][0], hkl[1][1], hkl[1][2]) GL.glEnd() GL.glBegin(GL.GL_LINE_LOOP) - GL.glColor3f(0.0, 0.0, 1.0) # blue - GL.glVertex3f(0.,0.,0.) - GL.glVertex3f(hkl[2][0],hkl[2][1],hkl[2][2]) + GL.glColor3f(0.0, 0.0, 1.0) # blue + GL.glVertex3f(0.0, 0.0, 0.0) + GL.glVertex3f(hkl[2][0], hkl[2][1], hkl[2][2]) GL.glEnd() if self.tex: -# print "drawing images" + # print "drawing images" GL.glEnable(GL.GL_TEXTURE_2D) - GL.glColor4f(.0, 1.0, .0, 1.0) # red + GL.glColor4f(0.0, 1.0, 0.0, 1.0) # red GL.glBegin(GL.GL_QUADS) # generate a grid of squares to map the texture in 3D # opengl has better "map" methods to do this - for i,j,g1,g2,g3 in self.pts: -# print i,j,g1,g2,g3 - GL.glTexCoord2f(i,j) - GL.glVertex3f(g1, g2, g3) + for i, j, g1, g2, g3 in self.pts: + # print i,j,g1,g2,g3 + GL.glTexCoord2f(i, j) + GL.glVertex3f(g1, g2, g3) GL.glEnd() -# GL.glDisable(GL.GL_TEXTURE_2D) - + # GL.glDisable(GL.GL_TEXTURE_2D) + GL.glFlush() GL.glEnable(GL.GL_LIGHTING) - - - - - -if __name__=="__main__": +if __name__ == "__main__": try: - lines=open(sys.argv[1],"r").readlines() + lines = open(sys.argv[1], "r").readlines() except: - print("Usage %s gvector_file [ubifile] [image parfile]"%(sys.argv[0])) + print("Usage %s gvector_file [ubifile] [image parfile]" % (sys.argv[0])) raise # sys.exit() - - on=0 - xyz=[] + + on = 0 + xyz = [] for line in lines: - if on==1: + if on == 1: try: - vals=[float(x) for x in line.split()] - xyz.append( [ vals[0],vals[1],vals[2] ]) + vals = [float(x) for x in line.split()] + xyz.append([vals[0], vals[1], vals[2]]) except: pass - if line.find("xr yr zr")>0 or line.find("gx ")>0: + if line.find("xr yr zr") > 0 or line.find("gx ") > 0: on = 1 - xyz=numpy.array(xyz) - if len(xyz) == 0 and lines[0][0]=="#": + xyz = numpy.array(xyz) + if len(xyz) == 0 and lines[0][0] == "#": from ImageD11 import columnfile - c = columnfile.columnfile( sys.argv[1] ) - xyz = numpy.array( (c.gx, c.gy, c.gz )).T - npeaks = len(xyz) - if len(sys.argv)==3: - o=plot3d(None,data=xyz,ubis=sys.argv[2]) - elif len(sys.argv)==6: - o=plot3d(None,data=xyz,ubis=sys.argv[2],image=sys.argv[3],pars=sys.argv[4],spline=sys.argv[5]) + + c = columnfile.columnfile(sys.argv[1]) + xyz = numpy.array((c.gx, c.gy, c.gz)).T + npeaks = len(xyz) + if len(sys.argv) == 3: + o = plot3d(None, data=xyz, ubis=sys.argv[2]) + elif len(sys.argv) == 6: + o = plot3d( + None, + data=xyz, + ubis=sys.argv[2], + image=sys.argv[3], + pars=sys.argv[4], + spline=sys.argv[5], + ) else: - o=plot3d(None,data=xyz,ubis=None) + o = plot3d(None, data=xyz, ubis=None) + def runit(): o.changedata(o.xyz) - o.after(100, runit ) + + o.after(100, runit) o.mainloop() diff --git a/ImageD11/tkGui/twodplot.py b/ImageD11/tkGui/twodplot.py index 36d16edb..05b519b8 100644 --- a/ImageD11/tkGui/twodplot.py +++ b/ImageD11/tkGui/twodplot.py @@ -1,4 +1,3 @@ - from __future__ import print_function # ImageD11_v0.4 Software for beamline ID11 @@ -27,7 +26,8 @@ From the matplotlib examples - modified for mouse """ import matplotlib -matplotlib.use('TkAgg') + +matplotlib.use("TkAgg") from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure @@ -36,106 +36,136 @@ import Tkinter as Tk import tkFileDialog as filedialog import tkMessageBox as messagebox - + except: import tkinter as Tk import tkinter.filedialog as filedialog import tkinter.messagebox as messagebox -import sys,os,time +import os, time + class data: - def __init__(self,x,y,d={}): - self.x=x - self.y=y - self.d=d + def __init__(self, x, y, d={}): + self.x = x + self.y = y + self.d = d + class twodplot(Tk.Frame): - def __init__(self,parent=None,data=None,quiet="No"): - Tk.Frame.__init__(self,parent) - self.quiet=quiet - self.f = Figure(figsize=(8,5), dpi=100) + def __init__(self, parent=None, data=None, quiet="No"): + Tk.Frame.__init__(self, parent) + self.quiet = quiet + self.f = Figure(figsize=(8, 5), dpi=100) self.a = self.f.add_subplot(111) - self.plotitems={} - self.maxpoints=1000000 # work around slow plotting + self.plotitems = {} + self.maxpoints = 1000000 # work around slow plotting # print data if data is not None: - self.plotitems[data[0]]=data[1] - self.title=None - self.xr=self.yr=None + self.plotitems[data[0]] = data[1] + self.title = None + self.xr = self.yr = None # a tk.DrawingArea self.canvas = FigureCanvasTkAgg(self.f, master=self) self.canvas.draw() - self.tkc=self.canvas.get_tk_widget() + self.tkc = self.canvas.get_tk_widget() self.tkc.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) - self.tkc.bind("",self.on_down) - self.tkc.bind("",self.on_up) - self.tkc.bind("",self.on_move) - self.tkc.bind("",self.on_2) - self.tkc.bind("",self.on_3) + self.tkc.bind("", self.on_down) + self.tkc.bind("", self.on_up) + self.tkc.bind("", self.on_move) + self.tkc.bind("", self.on_2) + self.tkc.bind("", self.on_3) self.when_down = -1 self.time_down = 0.1 self.bindkeys() - self.rubberbandbox=None - self.pack_opts={'side':Tk.LEFT, 'padx':'2', 'pady':'2'} - self.bf1=Tk.Frame(self) - Tk.Button(master=self.bf1, text='Clear', command=self.clear).pack(self.pack_opts) - Tk.Button(master=self.bf1, text='Save Plot', command=self.printplot).pack(self.pack_opts) - Tk.Button(master=self.bf1, text='LogY', command=self.logy).pack(self.pack_opts) - Tk.Button(master=self.bf1, text='LogX', command=self.logx).pack(self.pack_opts) -# FIXME - buttons for panx/y and zoomx/y - Tk.Button(master=self.bf1,text='>' , - command=lambda : self.keypress(self.a.panx,1 ) ).pack(self.pack_opts) - Tk.Button(master=self.bf1,text='<', - command=lambda : self.keypress(self.a.panx,-1) ).pack(self.pack_opts) - Tk.Button(master=self.bf1,text='^' , - command=lambda : self.keypress(self.a.pany,-1 ) ).pack(self.pack_opts) - Tk.Button(master=self.bf1,text='v' , - command=lambda : self.keypress(self.a.pany,1) ).pack(self.pack_opts) + self.rubberbandbox = None + self.pack_opts = {"side": Tk.LEFT, "padx": "2", "pady": "2"} + self.bf1 = Tk.Frame(self) + Tk.Button(master=self.bf1, text="Clear", command=self.clear).pack( + self.pack_opts + ) + Tk.Button(master=self.bf1, text="Save Plot", command=self.printplot).pack( + self.pack_opts + ) + Tk.Button(master=self.bf1, text="LogY", command=self.logy).pack(self.pack_opts) + Tk.Button(master=self.bf1, text="LogX", command=self.logx).pack(self.pack_opts) + # FIXME - buttons for panx/y and zoomx/y + Tk.Button( + master=self.bf1, text=">", command=lambda: self.keypress(self.a.panx, 1) + ).pack(self.pack_opts) + Tk.Button( + master=self.bf1, text="<", command=lambda: self.keypress(self.a.panx, -1) + ).pack(self.pack_opts) + Tk.Button( + master=self.bf1, text="^", command=lambda: self.keypress(self.a.pany, -1) + ).pack(self.pack_opts) + Tk.Button( + master=self.bf1, text="v", command=lambda: self.keypress(self.a.pany, 1) + ).pack(self.pack_opts) self.bf1.pack(side=Tk.BOTTOM) - self.bf2=Tk.Frame(self) - Tk.Button(master=self.bf2,text='UnZoomX' , - command=lambda : self.keypress(self.a.zoomx,-1 ) ).pack(self.pack_opts) - Tk.Button(master=self.bf2,text='ZoomX', - command=lambda : self.keypress(self.a.zoomx,1) ).pack(self.pack_opts) - Tk.Button(master=self.bf2,text='ZoomY' , - command=lambda : self.keypress(self.a.zoomy,1 ) ).pack(self.pack_opts) - Tk.Button(master=self.bf2,text='UnZoomY' , - command=lambda : self.keypress(self.a.zoomy,-1) ).pack(self.pack_opts) - Tk.Button(master=self.bf2,text='Autoscale' , - command=lambda : self.keypress(self.autoscale) ).pack(self.pack_opts) - Tk.Button(master=self.bf2,text='Autoscale Y', - command=lambda : self.keypress(self.autoscaley, None ) ).pack(self.pack_opts) + self.bf2 = Tk.Frame(self) + Tk.Button( + master=self.bf2, + text="UnZoomX", + command=lambda: self.keypress(self.a.zoomx, -1), + ).pack(self.pack_opts) + Tk.Button( + master=self.bf2, + text="ZoomX", + command=lambda: self.keypress(self.a.zoomx, 1), + ).pack(self.pack_opts) + Tk.Button( + master=self.bf2, + text="ZoomY", + command=lambda: self.keypress(self.a.zoomy, 1), + ).pack(self.pack_opts) + Tk.Button( + master=self.bf2, + text="UnZoomY", + command=lambda: self.keypress(self.a.zoomy, -1), + ).pack(self.pack_opts) + Tk.Button( + master=self.bf2, + text="Autoscale", + command=lambda: self.keypress(self.autoscale), + ).pack(self.pack_opts) + Tk.Button( + master=self.bf2, + text="Autoscale Y", + command=lambda: self.keypress(self.autoscaley, None), + ).pack(self.pack_opts) self.bf2.pack(side=Tk.BOTTOM) - self.label=Tk.Label(self,text="Plot window messages") - self.label.pack(side=Tk.BOTTOM,fill=Tk.X, expand=0) - self.pack(side=Tk.TOP,fill=Tk.BOTH,expand=Tk.YES) - self.hidden=[] + self.label = Tk.Label(self, text="Plot window messages") + self.label.pack(side=Tk.BOTTOM, fill=Tk.X, expand=0) + self.pack(side=Tk.TOP, fill=Tk.BOTH, expand=Tk.YES) + self.hidden = [] self.replot() - self.xd=None - self.yd=None + self.xd = None + self.yd = None def printplot(self): - fn = filedialog.asksaveasfilename(title="File name to print to", - defaultextension="png") + fn = filedialog.asksaveasfilename( + title="File name to print to", defaultextension="png" + ) print(fn) - f,e = os.path.splitext(fn) - extns=['png','ps','eps','bmp','raw','rgb'] + f, e = os.path.splitext(fn) + extns = ["png", "ps", "eps", "bmp", "raw", "rgb"] print(e) - if e.lower() in ['.png','.ps','.eps','.bmp','.raw','.rgb']: - self.update_idletasks() # Try to get screen redrawn - self.canvas.print_figure(fn, dpi=300, orientation='landscape') + if e.lower() in [".png", ".ps", ".eps", ".bmp", ".raw", ".rgb"]: + self.update_idletasks() # Try to get screen redrawn + self.canvas.print_figure(fn, dpi=300, orientation="landscape") else: - messagebox.showinfo("Sorry","I can only make output in these formats"+str(extns)) + messagebox.showinfo( + "Sorry", "I can only make output in these formats" + str(extns) + ) - def keypress(self,*arg): - if len(arg)>1: + def keypress(self, *arg): + if len(arg) > 1: arg[0](*arg[1:]) else: arg[0]() self.canvas.draw() - def bindkeys(self): return # self.bind_all('' ,lambda e: self.keypress(self.a.panx,1 ) ) @@ -149,17 +179,17 @@ def bindkeys(self): # self.bind_all('' , lambda e : self.keypress(self.autoscale) ) # self.bind_all('', lambda e : self.keypress(self.autoscaley, e ) ) - def autoscaley(self,e): + def autoscaley(self, e): print(dir(self.a.dataLim)) print(self.a.dataLim) - yr=self.a.get_ylim() + yr = self.a.get_ylim() self.a.set_ylim(yr) - def adddata(self,data): + def adddata(self, data): """ Takes a tuple of name, data object """ - self.plotitems[data[0]]=data[1] + self.plotitems[data[0]] = data[1] if data[0] in self.hidden: self.hidden.remove(data[0]) self.replot() @@ -169,44 +199,41 @@ def hideall(self): for item in list(self.plotitems.keys()): self.hidden.append(item) - - def removedata(self,name): + def removedata(self, name): try: self.plotitems.pop(name) except KeyError: pass - def replot(self): self.a.clear() if self.title is not None: self.a.set_title(self.title) -# b : blue -# g : green -# r : red -# c : cyan -# m : magenta -# y : yello -# c = ['g','r','c','m','y','b'] + # b : blue + # g : green + # r : red + # c : cyan + # m : magenta + # y : yello + # c = ['g','r','c','m','y','b'] for name in list(self.plotitems.keys()): if name in self.hidden: continue - item=self.plotitems[name] - #print 'x ', item.x - #print 'y ', item.y - #print 'd ', item.d - #print self.plotitems[name].d - -# if item.d.has_key('color'): -# pc=item.d['color'] -# else: -# c.append(c[0]) -# pc=c.pop(0) - if 'plotopts' in item.d: - po = item.d['plotopts'] + item = self.plotitems[name] + # print 'x ', item.x + # print 'y ', item.y + # print 'd ', item.d + # print self.plotitems[name].d + + # if item.d.has_key('color'): + # pc=item.d['color'] + # else: + # c.append(c[0]) + # pc=c.pop(0) + if "plotopts" in item.d: + po = item.d["plotopts"] else: - po = {'marker' : '.', 'linestyle' : 'none', - 'alpha' : 0.25} + po = {"marker": ".", "linestyle": "none", "alpha": 0.25} if "xlabel" in item.d: self.a.set_xlabel(item.d["xlabel"]) if "ylabel" in item.d: @@ -214,18 +241,26 @@ def replot(self): if "title" in item.d: self.a.set_title(item.d["title"]) try: - if "plottype" in item.d: - self.a.hist(item.y,item.x) - elif item.x.shape[0]>self.maxpoints: - if self.quiet=="No": - if messagebox.askyesno("Slow plotting workaround","Shall I plot only the first %d points for increased speed?"%(self.maxpoints)): - self.a.plot(item.x[:self.maxpoints],item.y[:self.maxpoints],**po) + if "plottype" in item.d: + self.a.hist(item.y, item.x) + elif item.x.shape[0] > self.maxpoints: + if self.quiet == "No": + if messagebox.askyesno( + "Slow plotting workaround", + "Shall I plot only the first %d points for increased speed?" + % (self.maxpoints), + ): + self.a.plot( + item.x[: self.maxpoints], item.y[: self.maxpoints], **po + ) else: - self.a.plot(item.x,item.y,**po) + self.a.plot(item.x, item.y, **po) else: - self.a.plot(item.x[:self.maxpoints],item.y[:self.maxpoints],**po) + self.a.plot( + item.x[: self.maxpoints], item.y[: self.maxpoints], **po + ) else: - self.a.plot(item.x,item.y,**po) + self.a.plot(item.x, item.y, **po) except: print("plotting exception ignored") raise @@ -236,22 +271,22 @@ def replot(self): self.canvas.draw() def logy(self): -# FIXME - clip negative values before making logscaled? + # FIXME - clip negative values before making logscaled? if self.a.yaxis.is_log(): - self.a.set_yscale('linear') + self.a.set_yscale("linear") else: - self.a.set_yscale('log') + self.a.set_yscale("log") self.canvas.draw() def logx(self): -# FIXME - clip negative values before making logscaled? + # FIXME - clip negative values before making logscaled? if self.a.xaxis.is_log(): - self.a.set_xscale('linear') + self.a.set_xscale("linear") else: - self.a.set_xscale('log') + self.a.set_xscale("log") self.canvas.draw() - def on_3(self,event): + def on_3(self, event): self.autoscale() def autoscale(self): @@ -260,32 +295,32 @@ def autoscale(self): self.replot() def clear(self): - self.plotitems={} + self.plotitems = {} self.replot() - def on_2(self,event): + def on_2(self, event): try: height = self.f.bbox.height() - x, y = event.x, height-event.y - (xd,yd)= self.a.transData.inverse_xy_tup( (x,y) ) + x, y = event.x, height - event.y + (xd, yd) = self.a.transData.inverse_xy_tup((x, y)) except: height = self.f.bbox.height - x, y = event.x, height-event.y - (xd,yd)= self.a.transData.inverted().transform((x,y)) - self.label.config(text="Clicked at x=%f y=%f"%(xd,yd)) + x, y = event.x, height - event.y + (xd, yd) = self.a.transData.inverted().transform((x, y)) + self.label.config(text="Clicked at x=%f y=%f" % (xd, yd)) # Callback functions for mouse - def on_down(self,event): + def on_down(self, event): # get the x and y coords, flip y from top to bottom self.when_down = time.time() try: height = self.f.bbox.height() - x, y = event.x, height-event.y - (self.xd,self.yd)= self.a.transData.inverse_xy_tup( (x,y) ) + x, y = event.x, height - event.y + (self.xd, self.yd) = self.a.transData.inverse_xy_tup((x, y)) except: height = self.f.bbox.height - x, y = event.x, height-event.y - (self.xd,self.yd)= self.a.transData.inverted().transform((x,y)) + x, y = event.x, height - event.y + (self.xd, self.yd) = self.a.transData.inverted().transform((x, y)) # transData transforms data coords to display coords. Use the # inverse method to transform back @@ -293,61 +328,65 @@ def on_down(self,event): # rubber banding: if self.rubberbandbox is not None: self.tkc.delete(self.rubberbandbox) - self.startx=self.tkc.canvasx(event.x) - self.starty=self.tkc.canvasx(event.y) + self.startx = self.tkc.canvasx(event.x) + self.starty = self.tkc.canvasx(event.y) - def on_move(self,event): + def on_move(self, event): x = self.tkc.canvasx(event.x) y = self.tkc.canvasy(event.y) - if (self.startx != event.x) and (self.starty != event.y) : + if (self.startx != event.x) and (self.starty != event.y): if self.rubberbandbox is not None: self.tkc.delete(self.rubberbandbox) - self.rubberbandbox = self.tkc.create_rectangle(self.startx, self.starty, x, y, outline='green') + self.rubberbandbox = self.tkc.create_rectangle( + self.startx, self.starty, x, y, outline="green" + ) # this flushes the output, making sure that # the rectangle makes it to the screen # before the next event is handled - def on_up(self,event): + def on_up(self, event): # get the x and y coords, flip y from top to bottom - if self.xd==None: + if self.xd == None: return - if time.time()-self.when_down < self.time_down and self.when_down>0: + if time.time() - self.when_down < self.time_down and self.when_down > 0: return self.tkc.delete(self.rubberbandbox) try: height = self.f.bbox.height() - x, y = event.x, height-event.y - (self.xu,self.yu) = self.a.transData.inverse_xy_tup( (x,y) ) + x, y = event.x, height - event.y + (self.xu, self.yu) = self.a.transData.inverse_xy_tup((x, y)) except: height = self.f.bbox.height - x, y = event.x, height-event.y - (self.xu,self.yu)= self.a.transData.inverted().transform((x,y)) + x, y = event.x, height - event.y + (self.xu, self.yu) = self.a.transData.inverted().transform((x, y)) # transData transforms data coords to display coords. Use the # inverse method to transform back if self.xu != self.xd and self.yu != self.yd: # rescale - xr=[self.xd,self.xu];xr.sort() - yr=[self.yd,self.yu];yr.sort() - self.xr=xr - self.yr=yr + xr = [self.xd, self.xu] + xr.sort() + yr = [self.yd, self.yu] + yr.sort() + self.xr = xr + self.yr = yr self.a.set_xlim(xr) self.a.set_ylim(yr) self.canvas.draw() -if __name__=="__main__": +if __name__ == "__main__": import numpy as np from math import pi - x=np.arange(0.0,3.0,0.01) - dat=data(x, - np.sin(2*pi*x)+5, - np.sqrt(np.sin(2*pi*x)+5), - { "title":"sin x" }) + + x = np.arange(0.0, 3.0, 0.01) + dat = data( + x, np.sin(2 * pi * x) + 5, np.sqrt(np.sin(2 * pi * x) + 5), {"title": "sin x"} + ) root = Tk.Tk() root.wm_title("Two dimensional plotting") - p=twodplot(root,data=dat) + p = twodplot(root, data=dat) Tk.mainloop() diff --git a/ImageD11/transform.py b/ImageD11/transform.py index 7bacda17..d40fabcb 100644 --- a/ImageD11/transform.py +++ b/ImageD11/transform.py @@ -1,5 +1,8 @@ - from __future__ import print_function +import logging +import numpy as np +from ImageD11 import gv_general, cImageD11 +import fabio # for LUT # ImageD11_v0.4 Software for beamline ID11 # Copyright (C) 2005 Jon Wright @@ -21,30 +24,18 @@ """ Functions for transforming peaks """ -import logging -import numpy as np -from ImageD11 import gv_general, cImageD11 -from numpy import radians, degrees -import fabio # for LUT - -try: - # crazy debug - _ = np.arccos(np.zeros(10, float)) -except: - print(dir()) - raise - -from math import pi def cross_product_2x2(a, b): - """ returns axb for two len(3) vectors a,b""" + """returns axb for two len(3) vectors a,b""" assert len(a) == len(b) == 3 - return np.array([a[1] * b[2] - a[2] * b[1], - a[2] * b[0] - a[0] * b[2], - a[0] * b[1] - a[1] * b[0]]) - - + return np.array( + [ + a[1] * b[2] - a[2] * b[1], + a[2] * b[0] - a[0] * b[2], + a[0] * b[1] - a[1] * b[0], + ] + ) def detector_rotation_matrix(tilt_x, tilt_y, tilt_z): @@ -53,27 +44,51 @@ def detector_rotation_matrix(tilt_x, tilt_y, tilt_z): tilts are in radians typically applied to peaks rotating around beam center """ - r1 = np.array([[np.cos(tilt_z), -np.sin(tilt_z), 0], # note this is r.h. - [np.sin(tilt_z), np.cos(tilt_z), 0], - [0, 0, 1]], float) - r2 = np.array([[np.cos(tilt_y), 0, np.sin(tilt_y)], - [0, 1, 0], - [-np.sin(tilt_y), 0, np.cos(tilt_y)]], float) - r3 = np.array([[1, 0, 0], - [0, np.cos(tilt_x), -np.sin(tilt_x)], - [0, np.sin(tilt_x), np.cos(tilt_x)]], float) + r1 = np.array( + [ + [np.cos(tilt_z), -np.sin(tilt_z), 0], # note this is r.h. + [np.sin(tilt_z), np.cos(tilt_z), 0], + [0, 0, 1], + ], + float, + ) + r2 = np.array( + [ + [np.cos(tilt_y), 0, np.sin(tilt_y)], + [0, 1, 0], + [-np.sin(tilt_y), 0, np.cos(tilt_y)], + ], + float, + ) + r3 = np.array( + [ + [1, 0, 0], + [0, np.cos(tilt_x), -np.sin(tilt_x)], + [0, np.sin(tilt_x), np.cos(tilt_x)], + ], + float, + ) r2r1 = np.dot(np.dot(r3, r2), r1) return r2r1 -def compute_xyz_lab(peaks, - y_center=0., y_size=0., tilt_y=0., - z_center=0., z_size=0., tilt_z=0., - tilt_x=0., - distance=0., - # detector_orientation=((1,0),(0,1)), - o11=1.0, o12=0.0, o21=0.0, o22=-1.0, - **kwds): +def compute_xyz_lab( + peaks, + y_center=0.0, + y_size=0.0, + tilt_y=0.0, + z_center=0.0, + z_size=0.0, + tilt_z=0.0, + tilt_x=0.0, + distance=0.0, + # detector_orientation=((1,0),(0,1)), + o11=1.0, + o12=0.0, + o21=0.0, + o22=-1.0, + **kwds +): """ Peaks is a 2 d array of x,y yc is the centre in y @@ -89,7 +104,7 @@ def compute_xyz_lab(peaks, ((-1, 0),( 0, 1)) for (-x, y) (( 0,-1),(-1, 0)) for (-y,-x) etc... - + kwds are not used (but lets you pass in a dict with other things in it) """ assert len(peaks) == 2, "peaks must be a 2D array" @@ -103,13 +118,17 @@ def compute_xyz_lab(peaks, # detector_orientation = [[o11, o12], [o21, o22]] # logging.debug("detector_orientation = "+str(detector_orientation)) - flipped = np.dot(np.array(detector_orientation, float), - peaks_on_detector) + flipped = np.dot(np.array(detector_orientation, float), peaks_on_detector) # - vec = np.array([np.zeros(flipped.shape[1]), # place detector at zero, - # sample at -dist - flipped[1, :], # x in search, frelon +z - flipped[0, :]], float) # y in search, frelon -y + vec = np.array( + [ + np.zeros(flipped.shape[1]), # place detector at zero, + # sample at -dist + flipped[1, :], # x in search, frelon +z + flipped[0, :], + ], + float, + ) # y in search, frelon -y # Position of diffraction spots in 3d space after detector tilts about # the beam centre on the detector rotvec = np.dot(r2r1, vec) @@ -118,49 +137,70 @@ def compute_xyz_lab(peaks, return rotvec -def compute_tth_eta(peaks, - y_center=0., y_size=0., tilt_y=0., - z_center=0., z_size=0., tilt_z=0., - tilt_x=0., - distance=0., - # detector_orientation=((1,0),(0,1)), - o11=1.0, o12=0.0, o21=0.0, o22=-1.0, - t_x=0.0, t_y=0.0, t_z=0.0, - omega=None, # == phi at chi=90 - wedge=0.0, # Wedge == theta on 4circ - chi=0.0, # == chi - 90 - **kwds): # spare args are ignored +def compute_tth_eta( + peaks, + y_center=0.0, + y_size=0.0, + tilt_y=0.0, + z_center=0.0, + z_size=0.0, + tilt_z=0.0, + tilt_x=0.0, + distance=0.0, + # detector_orientation=((1,0),(0,1)), + o11=1.0, + o12=0.0, + o21=0.0, + o22=-1.0, + t_x=0.0, + t_y=0.0, + t_z=0.0, + omega=None, # == phi at chi=90 + wedge=0.0, # Wedge == theta on 4circ + chi=0.0, # == chi - 90 + **kwds +): # spare args are ignored """ Finds x,y,z co-ordinates of peaks in the laboratory frame Computes tth/eta from these (in degrees) - + kwds are not used (left for convenience if you have a parameter dict) """ peaks_xyz = compute_xyz_lab( peaks, - y_center=y_center, y_size=y_size, tilt_y=tilt_y, - z_center=z_center, z_size=z_size, tilt_z=tilt_z, + y_center=y_center, + y_size=y_size, + tilt_y=tilt_y, + z_center=z_center, + z_size=z_size, + tilt_z=tilt_z, tilt_x=tilt_x, distance=distance, # detector_orientation=((1,0),(0,1)), - o11=o11, o12=o12, o21=o21, o22=o22) + o11=o11, + o12=o12, + o21=o21, + o22=o22, + ) tth, eta = compute_tth_eta_from_xyz( - peaks_xyz, - t_x=t_x, t_y=t_y, t_z=t_z, - omega=omega, - wedge=wedge, - chi=chi) + peaks_xyz, t_x=t_x, t_y=t_y, t_z=t_z, omega=omega, wedge=wedge, chi=chi + ) return tth, eta -def compute_tth_eta_from_xyz(peaks_xyz, omega, - t_x=0.0, t_y=0.0, t_z=0.0, - # == phi at chi=90 - wedge=0.0, # Wedge == theta on 4circ - chi=0.0, # == chi - 90 - **kwds): # last line is for laziness - +def compute_tth_eta_from_xyz( + peaks_xyz, + omega, + t_x=0.0, + t_y=0.0, + t_z=0.0, + # == phi at chi=90 + wedge=0.0, # Wedge == theta on 4circ + chi=0.0, # == chi - 90 + **kwds +): # last line is for laziness - """ Peaks is a 3 d array of x,y,z peak co-ordinates crystal_translation is the position of the grain giving rise to a diffraction spot @@ -168,22 +208,20 @@ def compute_tth_eta_from_xyz(peaks_xyz, omega, x,y is with respect to the axis of rotation (usually also beam centre). z with respect to beam height, z centre omega data are needed if crystal translations are used - + computed via the arctan recipe. - + returns tth/eta in degrees """ assert len(peaks_xyz) == 3 # Scattering vectors - if omega is None or (t_x == 0. and t_y == 0 and t_z == 0): + if omega is None or (t_x == 0.0 and t_y == 0 and t_z == 0): s1 = peaks_xyz else: # scattering_vectors if len(omega) != len(peaks_xyz[0]): - raise Exception( - "omega and peaks arrays must have same number of peaks") - s1 = peaks_xyz - compute_grain_origins(omega, wedge, chi, - t_x, t_y, t_z) + raise Exception("omega and peaks arrays must have same number of peaks") + s1 = peaks_xyz - compute_grain_origins(omega, wedge, chi, t_x, t_y, t_z) # CHANGED to HFP convention 4-9-2007 eta = np.degrees(np.arctan2(-s1[1, :], s1[2, :])) s1_perp_x = np.sqrt(s1[1, :] * s1[1, :] + s1[2, :] * s1[2, :]) @@ -192,51 +230,57 @@ def compute_tth_eta_from_xyz(peaks_xyz, omega, def compute_sinsqth_from_xyz(xyz): - """ Computes sin(theta)**2 + """Computes sin(theta)**2 x,y,z = co-ordinates of the pixel in cartesian space - - if you need grain translations then use func(peaks_xyz - compute_grain_origins(...) ) + + if you need grain translations then use: + func(peaks_xyz - compute_grain_origins(...) ) seems to be competitive with arctan2 (docs/sintheta_squared_geometry.ipynb) - - returns sin(theta)**2 + + returns sin(theta)**2 """ # R = hypotenuse of component normal to incident beam (defines x). e.g. y*y+z*z - R = xyz[1]*xyz[1] + xyz[2]*xyz[2] + R = xyz[1] * xyz[1] + xyz[2] * xyz[2] # Q = hypotenuse along the scattered beam, e.g. x*x+y*y+z*z - Q = xyz[0]*xyz[0] + R + Q = xyz[0] * xyz[0] + R # if Q == 0 then this is undefined - sinsqth = 0.5*R/( Q + xyz[0]*np.sqrt(Q) ) + sinsqth = 0.5 * R / (Q + xyz[0] * np.sqrt(Q)) return sinsqth def sinth2_sqrt_deriv(xyz): - """ sin(theta)**2 from xyz, and derivatives w.r.t x,y,z - """ - x,y,z = xyz - R = z*z + y*y - Q = R + x*x + """sin(theta)**2 from xyz, and derivatives w.r.t x,y,z""" + x, y, z = xyz + R = z * z + y * y + Q = R + x * x SQ = np.sqrt(Q) - R2 = R/2 + R2 = R / 2 # at x==y==0 this is undefined. ac - rQ_xSQ = 1/(Q + x*SQ) - sinth2 = R2*rQ_xSQ + rQ_xSQ = 1 / (Q + x * SQ) + sinth2 = R2 * rQ_xSQ # some simplification and collecting terms from expressions above to get: - sr = sinth2*rQ_xSQ - p = (x / SQ + 2)*sr # p should be in the range 3sr -> 2sr for x/x to 0/sqrt(R) - t = (rQ_xSQ - p) # - sinth2_dx = -(SQ*sr+x*p) - sinth2_dy = y*t - sinth2_dz = z*t + sr = sinth2 * rQ_xSQ + p = (x / SQ + 2) * sr # p should be in the range 3sr -> 2sr for x/x to 0/sqrt(R) + t = rQ_xSQ - p # + sinth2_dx = -(SQ * sr + x * p) + sinth2_dy = y * t + sinth2_dz = z * t return sinth2, sinth2_dx, sinth2_dy, sinth2_dz -def compute_xyz_from_tth_eta(tth, eta, omega, - t_x=0.0, t_y=0.0, t_z=0.0, - # == phi at chi=90 - wedge=0.0, # Wedge == theta on 4circ - chi=0.0, # == chi - 90 - **kwds): # last line is for laziness - +def compute_xyz_from_tth_eta( + tth, + eta, + omega, + t_x=0.0, + t_y=0.0, + t_z=0.0, + # == phi at chi=90 + wedge=0.0, # Wedge == theta on 4circ + chi=0.0, # == chi - 90 + **kwds +): # last line is for laziness - """ Given the tth, eta and omega, compute the xyz on the detector @@ -252,61 +296,58 @@ def compute_xyz_from_tth_eta(tth, eta, omega, xyz = np.zeros((3, tth.shape[0]), float) rtth = np.radians(tth) reta = np.radians(eta) - xyz[0, :] = np.cos(rtth) + xyz[0, :] = np.cos(rtth) # eta = np.degrees(np.arctan2(-s1[1, :], s1[2, :])) xyz[1, :] = -np.sin(rtth) * np.sin(reta) - xyz[2, :] = np.sin(rtth) * np.cos(reta) + xyz[2, :] = np.sin(rtth) * np.cos(reta) # Find vectors in the fast, slow directions in the detector plane - pks = np.array([(1, 0), - (0, 1), - (0, 0) ], float).T + pks = np.array([(1, 0), (0, 1), (0, 0)], float).T dxyzl = compute_xyz_lab(pks, **kwds) # == [xpos, ypos, zpos] shape (3,n) # # This was based on the recipe from Thomas in Acta Cryst ... # ... Modern Equations of ... - ds = dxyzl[:,0] - dxyzl[:,2] # 1,0 in plane is (1,0)-(0,0) - df = dxyzl[:,1] - dxyzl[:,2] # 0,1 in plane - dO = dxyzl[:,2] # origin pixel + ds = dxyzl[:, 0] - dxyzl[:, 2] # 1,0 in plane is (1,0)-(0,0) + df = dxyzl[:, 1] - dxyzl[:, 2] # 0,1 in plane + dO = dxyzl[:, 2] # origin pixel # Cross products to get the detector normal # Thomas uses an inverse matrix, but then divides out the determinant anyway - det_norm = np.cross( ds, df ) + det_norm = np.cross(ds, df) # Scattered rays on detector normal - norm = np.dot( det_norm, xyz ) + norm = np.dot(det_norm, xyz) # Check for divide by zero - msk = (norm == 0) + msk = norm == 0 needmask = False - if msk.sum()>0: + if msk.sum() > 0: norm += msk needmask = True # Intersect ray on detector plane - sc = np.dot( np.cross( df, dO ), xyz ) / norm - fc = np.dot( np.cross( dO, ds ), xyz ) / norm + sc = np.dot(np.cross(df, dO), xyz) / norm + fc = np.dot(np.cross(dO, ds), xyz) / norm if (t_x != 0) or (t_y != 0) or (t_z != 0): - go = compute_grain_origins(omega, - wedge=wedge, chi=chi, - t_x=t_x, t_y=t_y, t_z=t_z) + go = compute_grain_origins( + omega, wedge=wedge, chi=chi, t_x=t_x, t_y=t_y, t_z=t_z + ) # project these onto the detector face to give shifts - sct = ( xyz * np.cross( df, go.T ).T ).sum(axis=0) / norm - fct = ( xyz * np.cross( go.T, ds ).T ).sum(axis=0) / norm + sct = (xyz * np.cross(df, go.T).T).sum(axis=0) / norm + fct = (xyz * np.cross(go.T, ds).T).sum(axis=0) / norm sc -= sct fc -= fct if needmask: - fc = np.where( msk, 0, fc ) - sc = np.where( msk, 0, sc ) + fc = np.where(msk, 0, fc) + sc = np.where(msk, 0, sc) return fc, sc -def compute_grain_origins(omega, wedge=0.0, chi=0.0, - t_x=0.0, t_y=0.0, t_z=0.0): +def compute_grain_origins(omega, wedge=0.0, chi=0.0, t_x=0.0, t_y=0.0, t_z=0.0): """ # print "Using translations t_x %f t_y %f t_z %f"%(t_x,t_y,t_z) # Compute positions of grains @@ -330,14 +371,6 @@ def compute_grain_origins(omega, wedge=0.0, chi=0.0, # ( 0 , cos(chi) , sin(chi) ) ??? Use eta0 instead # ( 0 , -sin(chi) , cos(chi) ) ??? Use eta0 instead """ - w = np.radians(wedge) - WI = np.array([[np.cos(w), 0, -np.sin(w)], - [0, 1, 0], - [np.sin(w), 0, np.cos(w)]], float) - c = np.radians(chi) - CI = np.array([[1, 0, 0], - [0, np.cos(c), -np.sin(c)], - [0, np.sin(c), np.cos(c)]], float) t = np.zeros((3, omega.shape[0]), float) # crystal translations # Rotations in reverse order compared to making g-vector # also reverse directions. this is trans at all zero to @@ -366,35 +399,40 @@ def compute_grain_origins(omega, wedge=0.0, chi=0.0, return t -def compute_tth_histo(tth, no_bins=100, weight = False, weights = None, - **kwds): +def compute_tth_histo(tth, no_bins=100, weight=False, weights=None, **kwds): """ Compute a histogram of tth values Uses numpy's histogram rather that doing it by hand as above - New feature: weight by something (peak intensity for instance), send true for weight and weights values + New feature: weight by something (peak intensity for instance), send + true for weight and weights values Returns a normalised histogram (should make this a probability *and* For each datapoint, the corresponding histogram weight - + Updated and modernized 2021-02-11 S. Merkel """ maxtth = tth.max() mintth = tth.min() - logging.debug("Histogram: maxtth=%f , mintth=%f, bins=%d" % (maxtth, mintth, no_bins)) - if (weight): + logging.debug( + "Histogram: maxtth=%f , mintth=%f, bins=%d" % (maxtth, mintth, no_bins) + ) + if weight: logging.debug("Weighted histogram") - histogram,binedges = np.histogram(tth, bins=no_bins, weights=weights, density=True) + histogram, binedges = np.histogram( + tth, bins=no_bins, weights=weights, density=True + ) else: logging.debug("Un-weighted histogram") - histogram,binedges = np.histogram(tth, bins=no_bins, density=True) - tthbin = 0.5 *(binedges[:-1] + binedges[1:]) - histogram = histogram/histogram.sum() + histogram, binedges = np.histogram(tth, bins=no_bins, density=True) + tthbin = 0.5 * (binedges[:-1] + binedges[1:]) + histogram = histogram / histogram.sum() # histogram value for each peak # len(hpk) = number of peaks - # Tried to use numpy's digitize but failed. Edges are treated differently between np.histogram and np.digitize (both are inclusive in np.histogram) + # Tried to use numpy's digitize but failed. Edges are treated differently between + # np.histogram and np.digitize (both are inclusive in np.histogram) # Tried many combinations and gave up - binsize = (maxtth - mintth) / (no_bins-1) + binsize = (maxtth - mintth) / (no_bins - 1) bins = np.floor((tth - mintth) / binsize).astype(np.int) hpk = np.take(histogram, bins) return tthbin, histogram, hpk @@ -419,12 +457,7 @@ def compute_k_vectors(tth, eta, wvln): return k -def compute_g_vectors(tth, - eta, - omega, - wvln, - wedge=0.0, - chi=0.0): +def compute_g_vectors(tth, eta, omega, wvln, wedge=0.0, chi=0.0): """ Generates spot positions in reciprocal space from twotheta, wavelength, omega and eta @@ -486,18 +519,17 @@ def uncompute_g_vectors(g, wavelength, wedge=0.0, chi=0.0): if wedge == chi == 0: post = None else: - post = gv_general.wedgechi( wedge=wedge, chi=chi ) + post = gv_general.wedgechi(wedge=wedge, chi=chi) omega1, omega2, valid = gv_general.g_to_k( - g, wavelength,axis=[0,0,-1], pre=None, post=post ) + g, wavelength, axis=[0, 0, -1], pre=None, post=post + ) # we know g, omega. Compute k as ... ? if post is None: pre = None else: - pre = gv_general.chiwedge( wedge=wedge, chi=chi ).T - k_one = gv_general.k_to_g( g, omega1, axis=[0,0,1], - pre = pre, post=None) - k_two = gv_general.k_to_g( g, omega2, axis=[0,0,1], - pre = pre, post=None) + pre = gv_general.chiwedge(wedge=wedge, chi=chi).T + k_one = gv_general.k_to_g(g, omega1, axis=[0, 0, 1], pre=pre, post=None) + k_two = gv_general.k_to_g(g, omega2, axis=[0, 0, 1], pre=pre, post=None) # # k[1,:] = -ds*c*sin(eta) # ------ ------------- .... tan(eta) = -k1/k2 @@ -509,7 +541,7 @@ def uncompute_g_vectors(g, wavelength, wedge=0.0, chi=0.0): # ds = np.sqrt(np.sum(g * g, 0)) s = ds * wavelength / 2.0 # sin theta - tth = np.degrees(np.arcsin(s) * 2.) * valid + tth = np.degrees(np.arcsin(s) * 2.0) * valid eta1 = np.degrees(eta_one) * valid eta2 = np.degrees(eta_two) * valid omega1 = omega1 * valid @@ -523,15 +555,13 @@ def uncompute_one_g_vector(gv, wavelength, wedge=0.0): assert uncompute_g_vectors(compute_g_vector(tth,eta,omega))==tth,eta,omega """ t, e, o = uncompute_g_vectors( - np.transpose( - np.array([gv, gv])), - wavelength, - wedge=wedge) + np.transpose(np.array([gv, gv])), wavelength, wedge=wedge + ) return t[0], [e[0][0], e[1][0]], [o[0][0], o[1][0]] -def compute_lorentz_factors(tth, eta, omega, wavelength, wedge=0., chi=0.): +def compute_lorentz_factors(tth, eta, omega, wavelength, wedge=0.0, chi=0.0): """ From Kabsch 1988 J. Appl. Cryst. 21 619 @@ -543,7 +573,7 @@ def compute_lorentz_factors(tth, eta, omega, wavelength, wedge=0., chi=0.): """ # So is along +x, the incident beam defines the co-ordinates in ImageD11 # length is in reciprocal space units, 1/lambda - So = [1. / wavelength, 0, 0] + So = [1.0 / wavelength, 0, 0] # # u vector along rotation axis # starts as along z @@ -554,186 +584,209 @@ def compute_lorentz_factors(tth, eta, omega, wavelength, wedge=0., chi=0.): # (-sin(omega) , cos(omega), 0 ) # ( 0 , 0 , 1 ) # - W = [[np.cos(wedge), 0, np.sin(wedge)], - [0, 1, 0], - [-np.sin(wedge), 0, np.cos(wedge)]] + W = [ + [np.cos(wedge), 0, np.sin(wedge)], + [0, 1, 0], + [-np.sin(wedge), 0, np.cos(wedge)], + ] # - C = [[1, 0, 0], - [0, np.cos(chi), np.sin(chi)], - [0, -np.sin(chi), np.cos(chi)]] + C = [[1, 0, 0], [0, np.cos(chi), np.sin(chi)], [0, -np.sin(chi), np.cos(chi)]] u = np.dot(C, np.dot(W, u)) u_x_So = cross_product_2x2(u, So) # if DEBUG: print "axis orientation",u # # S = scattered vectors. Length 1/lambda. - S = np.array([np.cos(np.radians(tth) / 2.) * np.sin(np.radians(eta)) / wavelength, - np.cos(np.radians(tth) / 2.) * np.cos(np.radians(eta)) / wavelength, - np.sin(np.radians(tth) / 2.) / wavelength]) + S = np.array( + [ + np.cos(np.radians(tth) / 2.0) * np.sin(np.radians(eta)) / wavelength, + np.cos(np.radians(tth) / 2.0) * np.cos(np.radians(eta)) / wavelength, + np.sin(np.radians(tth) / 2.0) / wavelength, + ] + ) try: S_dot_u_x_So = np.dot(S, u_x_So) - except: + except Exception: print(S.shape, u_x_So.shape) mod_S = np.sqrt(S * S) mod_So = np.sqrt(So * So) try: lorentz = abs(S_dot_u_x_So) / mod_S / mod_So - except: + except Exception: raise Exception("Please fix this div0 crap in lorentz") return lorentz -def compute_polarisation_factors(args): - """ - From Kabsch 1988 J. Appl. Cryst. 21 619 - - DIVIDE the intensities by: - = (1 - 2p) [ 1 - (n.S/|S|^2) ] + p { 1 + [S.S_0/(|S||S_0|)^2]^2} - - p = degree of polarisation (sync = 1, tube = 0.5 , mono + tube in between) - or "probability of finding electric field vector in plane having - normal, n" - S = scattered vector - S_0 = incident vector - n = normal to polarisation plane, typically perpendicular to S_0 - - In ImageD11 we normally expect to find: - x axis along the beam - z axis being up, and parallel to the normal n mentioned above - """ - n = [0, 0, 1] - class Ctransform(object): - pnames = ( "y_center", "z_center", "y_size", "z_size", - "distance", "wavelength","omegasign", - "tilt_x","tilt_y","tilt_z", - "o11", "o12", "o21", "o22", - "wedge", "chi" ) - def __init__(self, pars ): - - """ To Do ...: - origin = xyz(0,0), ds = xyz(1,0), df = xyz(0,1) - xyz(s,f) = origin + ds*s + df*f + pnames = ( + "y_center", + "z_center", + "y_size", + "z_size", + "distance", + "wavelength", + "omegasign", + "tilt_x", + "tilt_y", + "tilt_z", + "o11", + "o12", + "o21", + "o22", + "wedge", + "chi", + ) + + def __init__(self, pars): + + """To Do ...: + origin = xyz(0,0), ds = xyz(1,0), df = xyz(0,1) + xyz(s,f) = origin + ds*s + df*f """ - self.pars={} + self.pars = {} for p in self.pnames: - self.pars[p] = pars[p] # copy + self.pars[p] = pars[p] # copy self.reset() - + def reset(self): p = self.pars - self.distance_vec = np.array( (p['distance'],0.,0.)) - self.dmat = detector_rotation_matrix( p['tilt_x'], p['tilt_y'], p['tilt_z']) - self.fmat = np.array( [[ 1, 0, 0], - [ 0, p['o22'], p['o21']], - [ 0, p['o12'], p['o11']]] ) + self.distance_vec = np.array((p["distance"], 0.0, 0.0)) + self.dmat = detector_rotation_matrix(p["tilt_x"], p["tilt_y"], p["tilt_z"]) + self.fmat = np.array( + [[1, 0, 0], [0, p["o22"], p["o21"]], [0, p["o12"], p["o11"]]] + ) self.rmat = np.dot(self.dmat, self.fmat).ravel() - self.cen = np.array( ( p["z_center"], p["y_center"], p["z_size"], p["y_size"] )) - + self.cen = np.array((p["z_center"], p["y_center"], p["z_size"], p["y_size"])) + def sf2xyz(self, sc, fc, tx=0, ty=0, tz=0, out=None): assert len(sc) == len(fc) if out is None: - out = np.empty( (len(sc),3), float) - t = np.array( (tx,ty,tz) ) - cImageD11.compute_xlylzl( sc, fc, self.cen, self.rmat, self.distance_vec, out) + out = np.empty((len(sc), 3), float) + # t = np.array((tx, ty, tz)) + cImageD11.compute_xlylzl(sc, fc, self.cen, self.rmat, self.distance_vec, out) return out - - def xyz2gv(self, xyz, omega, tx=0, ty=0, tz=0, out=None ): + + def xyz2gv(self, xyz, omega, tx=0, ty=0, tz=0, out=None): assert len(omega) == len(xyz) if out is None: - out = np.empty( (len(xyz),3), float) - cImageD11.compute_gv( xyz, - omega, - self.pars['omegasign'], - self.pars['wavelength'], - self.pars['wedge'], - self.pars['chi'], - np.array((tx,ty,tz)), - out) + out = np.empty((len(xyz), 3), float) + cImageD11.compute_gv( + xyz, + omega, + self.pars["omegasign"], + self.pars["wavelength"], + self.pars["wedge"], + self.pars["chi"], + np.array((tx, ty, tz)), + out, + ) return out - - def sf2gv( self, sc, fc, omega, tx=0, ty=0, tz=0, out=None ): - xyz = self.sf2xyz( sc, fc, tx, ty, tz ) - return self.xyz2gv( xyz, omega, tx, ty, tz, out ) - - -class PixelLUT( object ): - - """ A look up table for a 2D image to store pixel-by-pixel values - """ - + + def sf2gv(self, sc, fc, omega, tx=0, ty=0, tz=0, out=None): + xyz = self.sf2xyz(sc, fc, tx, ty, tz) + return self.xyz2gv(xyz, omega, tx, ty, tz, out) + + +class PixelLUT(object): + + """A look up table for a 2D image to store pixel-by-pixel values""" + # parameters that can be used to create this LUT - pnames = ( "y_center", "z_center", "y_size", "z_size", - "distance", "wavelength", "omegasign", - "tilt_x","tilt_y","tilt_z", - "o11", "o12", "o21", "o22", - "wedge", "chi", "dxfile", "dyfile", "spline", "shape" ) - - def __init__( self, pars ): + pnames = ( + "y_center", + "z_center", + "y_size", + "z_size", + "distance", + "wavelength", + "omegasign", + "tilt_x", + "tilt_y", + "tilt_z", + "o11", + "o12", + "o21", + "o22", + "wedge", + "chi", + "dxfile", + "dyfile", + "spline", + "shape", + ) + + def __init__(self, pars): """ pars is a dictionary containing the calibration parameters """ self.pars = {} for p in self.pnames: if p in pars: - self.pars[p] = pars[p] # make a copy - if 'dxfile' in pars: + self.pars[p] = pars[p] # make a copy + if "dxfile" in pars: # slow/fast coordinates on image at pixel centers - self.df = fabio.open( pars['dxfile'] ).data - self.ds = fabio.open( pars['dyfile'] ).data - self.shape = s = self.ds.shape # get shape from file - self.pars['shape'] = s - slow, fast = np.mgrid[ 0:s[0], 0:s[1] ] + self.df = fabio.open(pars["dxfile"]).data + self.ds = fabio.open(pars["dyfile"]).data + self.shape = s = self.ds.shape # get shape from file + self.pars["shape"] = s + slow, fast = np.mgrid[0 : s[0], 0 : s[1]] self.sc = slow + self.ds self.fc = fast + self.df - elif 'spline' in pars: # need to test this... + elif "spline" in pars: # need to test this... from ImageD11 import blobcorrector - b = blobcorrector.correctorclass( self.pars['spline'] ) + + b = blobcorrector.correctorclass(self.pars["spline"]) s = int(b.ymax - b.ymin), int(b.xmax - b.xmin) - if 'shape' in self.pars: # override. Probabl - s = self.pars['shape'] + if "shape" in self.pars: # override. Probabl + s = self.pars["shape"] self.shape = s - self.fc, self.sc = b.make_pixel_lut( s ) - slow, fast = np.mgrid[ 0:s[0], 0:s[1] ] + self.fc, self.sc = b.make_pixel_lut(s) + slow, fast = np.mgrid[0 : s[0], 0 : s[1]] self.df = self.fc - fast self.ds = self.sc - slow else: s = self.shape - self.sc, self.fc = np.mgrid[0:s[0], 0:s[1]] - self.df = None + self.sc, self.fc = np.mgrid[0 : s[0], 0 : s[1]] + self.df = None self.ds = None - - self.xyz = compute_xyz_lab( (self.sc.ravel(), self.fc.ravel()), **self.pars ) - self.sinthsq = compute_sinsqth_from_xyz( self.xyz ) + + self.xyz = compute_xyz_lab((self.sc.ravel(), self.fc.ravel()), **self.pars) + self.sinthsq = compute_sinsqth_from_xyz(self.xyz) self.tth, self.eta = compute_tth_eta_from_xyz(self.xyz, None, **self.pars) # scattering angles: - self.tth, self.eta = compute_tth_eta( (self.sc.ravel(), self.fc.ravel()), **self.pars ) + self.tth, self.eta = compute_tth_eta( + (self.sc.ravel(), self.fc.ravel()), **self.pars + ) # scattering vectors: - self.k = compute_k_vectors( self.tth, self.eta, self.pars.get('wavelength') ) + self.k = compute_k_vectors(self.tth, self.eta, self.pars.get("wavelength")) self.sinthsq.shape = s self.tth.shape = s self.eta.shape = s self.k.shape = (3, s[0], s[1]) self.xyz.shape = (3, s[0], s[1]) - + def spatial(self, sraw, fraw): - """ applies a spatial distortion to sraw, fraw (for peak centroids) """ + """applies a spatial distortion to sraw, fraw (for peak centroids)""" if self.df is None: return sraw, fraw else: - si = np.round(sraw.astype(int)).clip( 0, self.shape[1] - 1 ) - fi = np.round(fraw.astype(int)).clip( 0, self.shape[1] - 1 ) - sc = sraw + self.ds[ si, fi ] - fc = fraw + self.df[ si, fi ] + si = np.round(sraw.astype(int)).clip(0, self.shape[1] - 1) + fi = np.round(fraw.astype(int)).clip(0, self.shape[1] - 1) + sc = sraw + self.ds[si, fi] + fc = fraw + self.df[si, fi] return sc, fc - + def __repr__(self): - """ print yourself in a way we can use for eval """ - sp = "\n".join( [ "%s : %s,"%(repr(p), repr(self.pars[p])) for p in self.pnames - if p in self.pars ] ) - return "PixelLUT( { %s } )"%(sp) - - - + """print yourself in a way we can use for eval""" + sp = "\n".join( + [ + "%s : %s," % (repr(p), repr(self.pars[p])) + for p in self.pnames + if p in self.pars + ] + ) + return "PixelLUT( { %s } )" % (sp) + + if __name__ == "__main__": # from indexing import mod_360 def mod_360(theta, target): @@ -749,26 +802,32 @@ def mod_360(theta, target): diff = theta - target return theta - tth = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], float) + tth = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], float) eta = np.array([10, 40, 70, 100, 130, 160, 190, 220, 270, 340], float) om = np.array([0, 20, 40, 100, 60, 240, 300, 20, 42, 99], float) for wavelength in [0.1, 0.2, 0.3]: - for wedge in [-10., -5., 0., 5., 10.]: + for wedge in [-10.0, -5.0, 0.0, 5.0, 10.0]: print("Wavelength", wavelength, "wedge", wedge) - print("tth, eta, omega ... " +\ - "tth, eta, omega ... " +\ - "tth, eta, omega") + print( + "tth, eta, omega ... " + + "tth, eta, omega ... " + + "tth, eta, omega" + ) gv = compute_g_vectors(tth, eta, om, wavelength, wedge) t, e, o = uncompute_g_vectors(gv, wavelength, wedge) for i in range(tth.shape[0]): - print("%9.3f %9.3f %9.3f " % (tth[i], eta[i], om[i]), end=' ') - print("%9.3f %9.3f %9.3f " % (t[i], - mod_360(e[0][i], eta[i]), - mod_360(o[0][i], om[i])), end=' ') - print("%9.3f %9.3f %9.3f " % (t[i], - mod_360(e[1][i], eta[i]), - mod_360(o[1][i], om[i])), end=' ') + print("%9.3f %9.3f %9.3f " % (tth[i], eta[i], om[i]), end=" ") + print( + "%9.3f %9.3f %9.3f " + % (t[i], mod_360(e[0][i], eta[i]), mod_360(o[0][i], om[i])), + end=" ", + ) + print( + "%9.3f %9.3f %9.3f " + % (t[i], mod_360(e[1][i], eta[i]), mod_360(o[1][i], om[i])), + end=" ", + ) # Choose best fitting e_eta1 = mod_360(e[0][i], eta[i]) - eta[i] e_om1 = mod_360(o[0][i], om[i]) - om[i] diff --git a/ImageD11/transformer.py b/ImageD11/transformer.py index 964b75e8..cd795eed 100644 --- a/ImageD11/transformer.py +++ b/ImageD11/transformer.py @@ -1,4 +1,3 @@ - from __future__ import print_function # ImageD11_v1.1 Software for beamline ID11 @@ -21,162 +20,249 @@ import numpy import logging import math -import sys, os from ImageD11 import transform, unitcell, columnfile from ImageD11.parameters import par, parameters PARAMETERS = [ - par("omegasign", 1.0, - helpstring="Sign of the rotation about z " + \ - "(normally +1 for right handed)", - vary=False, - can_vary=False), - par('z_center', 1024.0, - helpstring="Beam centre in vertical, pixels", - vary=True, - can_vary=True, - stepsize=1.0), - par('y_center', 1024.0, - helpstring="Beam centre in horizontal, pixels", - vary=True, - can_vary=True, - stepsize=1.0), - par('distance', 50000.0, - helpstring="sample detector distance, same units as pixel size", - vary=True, - can_vary=True, - stepsize=100.0), - par('z_size', 48.08150, - helpstring="pixel size in vertical, same units distance", - vary=False, - can_vary=True, - stepsize=0.1), # this could actually vary - a bit crazy? - par('y_size', 46.77648, - helpstring="pixel size in horizontal, same units as distance", - vary=False, - can_vary=True, - stepsize=0.1), # this could actually vary - a bit crazy? - par('tilt_z', 0.0, - helpstring="detector tilt, right handed around z", - vary=True, - can_vary=True, - stepsize=transform.radians(0.1)), - par('tilt_y', 0.0, - helpstring="detector tilt, right handed around y", - vary=True, - can_vary=True, - stepsize=transform.radians(0.1)), - par('tilt_x', 0.0, - helpstring="detector tilt, right handed around x", - vary=False, - can_vary=True, - stepsize=transform.radians(0.1)), - par('fit_tolerance', 0.05, - helpstring="tolerance to decide which peaks to use", - vary=False, - can_vary=False), - par('wavelength', 0.155, - helpstring="wavelength, normally angstrom, " + \ - "same as units unit cell ", - vary=False, - can_vary=True, # but you'll be lucky! - stepsize=0.00001), - par('wedge', 0.0, - helpstring="wedge, rotation around y under omega", - vary=False, - can_vary=True, - stepsize=transform.radians(0.1)), - par('chi', 0.0, - helpstring="wedge, rotation around x under omega", - vary=False, - can_vary=True, - stepsize=transform.radians(0.1)), - par('cell__a' , 4.1569, - helpstring="unit cell par, same units as wavelength", - vary=False, - can_vary=True, - stepsize=0.01), - par('cell__b' , 4.1569, - helpstring="unit cell par, same units as wavelength", - vary=False, - can_vary=True, - stepsize=0.01), - par('cell__c' , 4.1569, - helpstring="unit cell par, same units as wavelength", - vary=False, - can_vary=True, - stepsize=0.01), - par('cell_alpha' , 90.0, - helpstring="unit cell par, degrees", - vary=False, - can_vary=True, - stepsize=0.01), - par('cell_beta' , 90.0, - helpstring="unit cell par, degrees", - vary=False, - can_vary=True, - stepsize=0.01), - par('cell_gamma' , 90.0, - helpstring="unit cell par, degrees", - vary=False, - can_vary=True, - stepsize=0.01), - par('cell_lattice_[P,A,B,C,I,F,R]', "P", - helpstring="lattice centering type. Try P if you are not sure", - vary=False, - can_vary=False), - par('o11' , 1, - helpstring="detector flip element +1 for frelon & quantix", - vary=False, - can_vary=False), - par('o12' , 0, - helpstring="detector flip element 0 for frelon & quantix", - vary=False, - can_vary=False), - par('o21' , 0, - helpstring="detector flip element 0 for frelon & quantix", - vary=False, - can_vary=False), - par('o22' , -1, - helpstring="detector flip element -1 for frelon & +1 for quantix", - vary=False, - can_vary=False), - par('t_x' , 0, - helpstring="crystal translation, units as distance/pixels", - vary=False, - can_vary=True, - stepsize=1.), - par('t_y' , 0, - helpstring="crystal translation, units as distance/pixels", - vary=False, - can_vary=True, - stepsize=1.), - par('t_z' , 0, - helpstring="crystal translation, units as distance/pixels", - vary=False, - can_vary=True, - stepsize=1.), - par('no_bins', 10000, - helpstring="Number of bins to use in histogram based filters", - vary=False, - can_vary=False), - par('min_bin_prob', 1e-5, - helpstring="Number of bins to use in histogram based filters", - vary=False, - can_vary=False), - par('weight_hist_intensities', False, - helpstring="If True or 1, weight histograms by peak intensities. If False or 0, histogram by number of peaks.", - vary=False, - can_vary=False), - ] - + par( + "omegasign", + 1.0, + helpstring="Sign of the rotation about z " + "(normally +1 for right handed)", + vary=False, + can_vary=False, + ), + par( + "z_center", + 1024.0, + helpstring="Beam centre in vertical, pixels", + vary=True, + can_vary=True, + stepsize=1.0, + ), + par( + "y_center", + 1024.0, + helpstring="Beam centre in horizontal, pixels", + vary=True, + can_vary=True, + stepsize=1.0, + ), + par( + "distance", + 50000.0, + helpstring="sample detector distance, same units as pixel size", + vary=True, + can_vary=True, + stepsize=100.0, + ), + par( + "z_size", + 48.08150, + helpstring="pixel size in vertical, same units distance", + vary=False, + can_vary=True, + stepsize=0.1, + ), # this could actually vary - a bit crazy? + par( + "y_size", + 46.77648, + helpstring="pixel size in horizontal, same units as distance", + vary=False, + can_vary=True, + stepsize=0.1, + ), # this could actually vary - a bit crazy? + par( + "tilt_z", + 0.0, + helpstring="detector tilt, right handed around z", + vary=True, + can_vary=True, + stepsize=transform.radians(0.1), + ), + par( + "tilt_y", + 0.0, + helpstring="detector tilt, right handed around y", + vary=True, + can_vary=True, + stepsize=transform.radians(0.1), + ), + par( + "tilt_x", + 0.0, + helpstring="detector tilt, right handed around x", + vary=False, + can_vary=True, + stepsize=transform.radians(0.1), + ), + par( + "fit_tolerance", + 0.05, + helpstring="tolerance to decide which peaks to use", + vary=False, + can_vary=False, + ), + par( + "wavelength", + 0.155, + helpstring="wavelength, normally angstrom, " + "same as units unit cell ", + vary=False, + can_vary=True, # but you'll be lucky! + stepsize=0.00001, + ), + par( + "wedge", + 0.0, + helpstring="wedge, rotation around y under omega", + vary=False, + can_vary=True, + stepsize=transform.radians(0.1), + ), + par( + "chi", + 0.0, + helpstring="wedge, rotation around x under omega", + vary=False, + can_vary=True, + stepsize=transform.radians(0.1), + ), + par( + "cell__a", + 4.1569, + helpstring="unit cell par, same units as wavelength", + vary=False, + can_vary=True, + stepsize=0.01, + ), + par( + "cell__b", + 4.1569, + helpstring="unit cell par, same units as wavelength", + vary=False, + can_vary=True, + stepsize=0.01, + ), + par( + "cell__c", + 4.1569, + helpstring="unit cell par, same units as wavelength", + vary=False, + can_vary=True, + stepsize=0.01, + ), + par( + "cell_alpha", + 90.0, + helpstring="unit cell par, degrees", + vary=False, + can_vary=True, + stepsize=0.01, + ), + par( + "cell_beta", + 90.0, + helpstring="unit cell par, degrees", + vary=False, + can_vary=True, + stepsize=0.01, + ), + par( + "cell_gamma", + 90.0, + helpstring="unit cell par, degrees", + vary=False, + can_vary=True, + stepsize=0.01, + ), + par( + "cell_lattice_[P,A,B,C,I,F,R]", + "P", + helpstring="lattice centering type. Try P if you are not sure", + vary=False, + can_vary=False, + ), + par( + "o11", + 1, + helpstring="detector flip element +1 for frelon & quantix", + vary=False, + can_vary=False, + ), + par( + "o12", + 0, + helpstring="detector flip element 0 for frelon & quantix", + vary=False, + can_vary=False, + ), + par( + "o21", + 0, + helpstring="detector flip element 0 for frelon & quantix", + vary=False, + can_vary=False, + ), + par( + "o22", + -1, + helpstring="detector flip element -1 for frelon & +1 for quantix", + vary=False, + can_vary=False, + ), + par( + "t_x", + 0, + helpstring="crystal translation, units as distance/pixels", + vary=False, + can_vary=True, + stepsize=1.0, + ), + par( + "t_y", + 0, + helpstring="crystal translation, units as distance/pixels", + vary=False, + can_vary=True, + stepsize=1.0, + ), + par( + "t_z", + 0, + helpstring="crystal translation, units as distance/pixels", + vary=False, + can_vary=True, + stepsize=1.0, + ), + par( + "no_bins", + 10000, + helpstring="Number of bins to use in histogram based filters", + vary=False, + can_vary=False, + ), + par( + "min_bin_prob", + 1e-5, + helpstring="Number of bins to use in histogram based filters", + vary=False, + can_vary=False, + ), + par( + "weight_hist_intensities", + False, + helpstring="If True or 1, weight histograms by peak intensities. If False or 0, histogram by number of peaks.", + vary=False, + can_vary=False, + ), +] class transformer: """ - Handles the algorithmic, fitting and state information for + Handles the algorithmic, fitting and state information for fitting parameters to give experimental calibrations """ + def __init__(self, parfile=None, fltfile=None): """ Nothing is passed in @@ -206,11 +292,11 @@ def get_variable_list(self): return self.parameterobj.get_variable_list() def getvars(self): - """ decide what is refinable """ + """decide what is refinable""" return self.parameterobj.varylist def setvars(self, varlist): - """ set the things to refine """ + """set the things to refine""" self.parameterobj.varylist = varlist def loadfiltered(self, filename): @@ -218,42 +304,42 @@ def loadfiltered(self, filename): Read in 3D peaks from peaksearch """ self.colfile = columnfile.columnfile(filename) - if ( ("sc" in self.colfile.titles) and - ("fc" in self.colfile.titles) and - ("omega" in self.colfile.titles)): + if ( + ("sc" in self.colfile.titles) + and ("fc" in self.colfile.titles) + and ("omega" in self.colfile.titles) + ): self.setxyomcols("sc", "fc", "omega") - if (self.colfile.titles[0:3] == ["sc", "fc", "omega"]): + if self.colfile.titles[0:3] == ["sc", "fc", "omega"]: self.setxyomcols("sc", "fc", "omega") - if (self.colfile.titles[0:3] == ["xc", "yc", "omega"]): + if self.colfile.titles[0:3] == ["xc", "yc", "omega"]: self.setxyomcols("xc", "yc", "omega") if "spot3d_id" not in self.colfile.titles: - self.colfile.addcolumn(list(range(self.colfile.nrows)), - "spot3d_id") + self.colfile.addcolumn(list(range(self.colfile.nrows)), "spot3d_id") def setxyomcols(self, xname, yname, omeganame): self.xname = xname self.yname = yname self.omeganame = omeganame - logging.warning("titles are %s %s %s" % (self.xname, - self.yname, - self.omeganame)) + logging.warning( + "titles are %s %s %s" % (self.xname, self.yname, self.omeganame) + ) def getcols(self): return self.colfile.titles def loadfileparameters(self, filename): - """ Read in beam center etc from file """ + """Read in beam center etc from file""" self.parameterobj.loadparameters(filename) def saveparameters(self, filename): - """ Save beam center etc to file """ + """Save beam center etc to file""" self.parameterobj.saveparameters(filename) def applyargs(self, args): - """ for use with simplex/gof function, alter parameters """ + """for use with simplex/gof function, alter parameters""" self.parameterobj.set_variable_values(args) - def getcolumn(self, name): """Return the data""" return self.colfile.getcolumn(name) @@ -263,13 +349,13 @@ def addcolumn(self, col, name): return self.colfile.addcolumn(col, name) def compute_tth_eta(self): - """ Compute the twotheta and eta for peaks previous read in """ + """Compute the twotheta and eta for peaks previous read in""" if None in [self.xname, self.yname]: raise Exception("No peaks loaded") - peaks = [self.getcolumn(self.xname), - self.getcolumn(self.yname)] - peaks_xyz = transform.compute_xyz_lab(peaks, - **self.parameterobj.get_parameters()) + peaks = [self.getcolumn(self.xname), self.getcolumn(self.yname)] + peaks_xyz = transform.compute_xyz_lab( + peaks, **self.parameterobj.get_parameters() + ) # Store these in the columnfile self.addcolumn(peaks_xyz[0], "xl") self.addcolumn(peaks_xyz[1], "yl") @@ -277,38 +363,43 @@ def compute_tth_eta(self): # Get the Omega name? omega = self.getcolumn(self.omeganame) tth, eta = transform.compute_tth_eta_from_xyz( - peaks_xyz, - omega, - **self.parameterobj.get_parameters()) - self.addcolumn(tth , "tth") - self.addcolumn(eta , "eta") + peaks_xyz, omega, **self.parameterobj.get_parameters() + ) + self.addcolumn(tth, "tth") + self.addcolumn(eta, "eta") return tth, eta def compute_histo(self, colname): - """ Compute the histogram over twotheta for peaks previous read in + """Compute the histogram over twotheta for peaks previous read in Filtering is moved to a separate function - + colname is most-often "tth" - + other parameters are set in the parameter object no_bins = number of bins weight_hist_intensities: True or False False: histogram by number of measured peaks - True: weight by peak intensities + True: weight by peak intensities """ if colname not in self.colfile.titles: raise Exception("Cannot find column " + colname) weight = self.parameterobj.get("weight_hist_intensities") - if (weight): - bins, hist, hpk = transform.compute_tth_histo(self.getcolumn(colname), weight = True, weights = self.getcolumn("sum_intensity"), **self.parameterobj.get_parameters()) + if weight: + bins, hist, hpk = transform.compute_tth_histo( + self.getcolumn(colname), + weight=True, + weights=self.getcolumn("sum_intensity"), + **self.parameterobj.get_parameters() + ) else: - bins, hist, hpk = transform.compute_tth_histo(self.getcolumn(colname), - **self.parameterobj.get_parameters()) + bins, hist, hpk = transform.compute_tth_histo( + self.getcolumn(colname), **self.parameterobj.get_parameters() + ) self.addcolumn(hpk, colname + "_hist_prob") return bins, hist def compute_tth_histo(self): - """ Give hardwire access to tth """ + """Give hardwire access to tth""" if "tth" not in self.colfile.titles: self.compute_tth_eta() return self.compute_histo("tth") @@ -320,11 +411,9 @@ def filter_min(self, col, minval): if "tth_hist_prob" not in self.colfile.titles: self.compute_tth_histo() mask = self.colfile.getcolumn("tth_hist_prob") > minval - logging.info("Number of peaks before filtering = %d" % ( - self.colfile.nrows)) + logging.info("Number of peaks before filtering = %d" % (self.colfile.nrows)) self.colfile.filter(mask) - logging.info("Number of peaks after filtering = %d" % ( - self.colfile.nrows)) + logging.info("Number of peaks after filtering = %d" % (self.colfile.nrows)) def tth_entropy(self): """ @@ -341,70 +430,77 @@ def tth_entropy(self): return entropy def gof(self, args): - """ Compute how good is the fit of obs/calc peak positions in tth """ + """Compute how good is the fit of obs/calc peak positions in tth""" self.applyargs(args) - # + # if self.update_fitds: - cell = unitcell.unitcell_from_parameters( self.parameterobj ) + cell = unitcell.unitcell_from_parameters(self.parameterobj) # Here, pars is a dictionary of name/value pairs to pass to compute_tth_eta tth, eta = self.compute_tth_eta() w = self.parameterobj.get("wavelength") - gof = 0. + gof = 0.0 npeaks = 0 - for i in range(len(self.tthc)):# (twotheta_rad_cell.shape[0]): + for i in range(len(self.tthc)): # (twotheta_rad_cell.shape[0]): if self.update_fitds: - b4 = self.fitds[i] - self.fitds[i] = cell.ds( self.fithkls[i] ) + # b4 = self.fitds[i] + self.fitds[i] = cell.ds(self.fithkls[i]) self.tthc[i] = transform.degrees(math.asin(self.fitds[i] * w / 2) * 2) diff = numpy.take(tth, self.indices[i]) - self.tthc[i] -# print "peak",i,"diff",maximum.reduce(diff),minimum.reduce(diff) + # print "peak",i,"diff",maximum.reduce(diff),minimum.reduce(diff) gof = gof + numpy.sum(diff * diff) npeaks = npeaks + len(diff) gof = gof / npeaks return gof * 1e6 def fit(self, tthmin=0, tthmax=180): - """ Apply simplex to improve fit of obs/calc tth """ + """Apply simplex to improve fit of obs/calc tth""" tthmin = float(tthmin) tthmax = float(tthmax) from . import simplex - if self.theoryds == None: + + if self.theoryds is None: self.addcellpeaks() # Assign observed peaks to rings self.wavelength = None self.indices = [] # which peaks used - self.tthc = [] # computed two theta values - self.fitds = [] # hmm? + self.tthc = [] # computed two theta values + self.fitds = [] # hmm? self.fithkls = [] - self.fit_tolerance = 1. + self.fit_tolerance = 1.0 pars = self.parameterobj.get_parameters() - w = float(pars['wavelength']) + w = float(pars["wavelength"]) self.wavelength = w - self.fit_tolerance = float(pars['fit_tolerance']) - print("Tolerance for assigning peaks to rings", \ - self.fit_tolerance, ", min tth", tthmin, ", max tth", tthmax) + self.fit_tolerance = float(pars["fit_tolerance"]) + print( + "Tolerance for assigning peaks to rings", + self.fit_tolerance, + ", min tth", + tthmin, + ", max tth", + tthmax, + ) tth, eta = self.compute_tth_eta() for i in range(len(self.theoryds)): dsc = self.theoryds[i] - tthcalc = math.asin(dsc * w / 2) * 360. / math.pi # degrees + tthcalc = math.asin(dsc * w / 2) * 360.0 / math.pi # degrees if tthcalc > tthmax: break elif tthcalc < tthmin: continue - logicals = numpy.logical_and(numpy.greater(tth, - tthcalc - self.fit_tolerance), - numpy.less(tth , - tthcalc + self.fit_tolerance)) + logicals = numpy.logical_and( + numpy.greater(tth, tthcalc - self.fit_tolerance), + numpy.less(tth, tthcalc + self.fit_tolerance), + ) if sum(logicals) > 0: self.tthc.append(tthcalc) self.fitds.append(dsc) - self.fithkls.append( self.unitcell.ringhkls[dsc][0] ) + self.fithkls.append(self.unitcell.ringhkls[dsc][0]) ind = numpy.compress(logicals, list(range(len(tth)))) self.indices.append(ind) self.update_fitds = False for p in self.parameterobj.varylist: - if p.startswith('cell'): + if p.startswith("cell"): self.update_fitds = True guess = self.parameterobj.get_variable_values() inc = self.parameterobj.get_variable_stepsizes() @@ -425,8 +521,6 @@ def fit(self, tthmin=0, tthmax=180): if self.update_fitds: self.addcellpeaks() - - def addcellpeaks(self, limit=None): """ Adds unit cell predicted peaks for fitting against @@ -442,9 +536,18 @@ def addcellpeaks(self, limit=None): # in microns of the unit cell peaks # pars = self.parameterobj.get_parameters() - cell = [ pars[name] for name in ['cell__a', 'cell__b', 'cell__c', - 'cell_alpha', 'cell_beta', 'cell_gamma']] - lattice = pars['cell_lattice_[P,A,B,C,I,F,R]'] + cell = [ + pars[name] + for name in [ + "cell__a", + "cell__b", + "cell__c", + "cell_alpha", + "cell_beta", + "cell_gamma", + ] + ] + lattice = pars["cell_lattice_[P,A,B,C,I,F,R]"] if "tth" not in self.colfile.titles: self.compute_tth_eta() # Find last peak in radius @@ -452,30 +555,25 @@ def addcellpeaks(self, limit=None): highest = numpy.maximum.reduce(self.getcolumn("tth")) else: highest = limit - w = pars['wavelength'] - ds = 2 * numpy.sin(transform.radians(highest) / 2.) / w + w = pars["wavelength"] + ds = 2 * numpy.sin(transform.radians(highest) / 2.0) / w self.dslimit = ds self.unitcell = unitcell.unitcell(cell, lattice) # If the space group is provided use xfab for generate unique hkls - if 'cell_sg' in pars: - self.theorypeaks = self.unitcell.gethkls_xfab(ds, pars['cell_sg']) + if "cell_sg" in pars: + self.theorypeaks = self.unitcell.gethkls_xfab(ds, pars["cell_sg"]) tths = [] self.theoryds = [] for i in range(len(self.theorypeaks)): - tths.append(2 * numpy.arcsin(w * self.theorypeaks[i][0] / 2.)) - self.theoryds.append( self.theorypeaks[i][0] ) + tths.append(2 * numpy.arcsin(w * self.theorypeaks[i][0] / 2.0)) + self.theoryds.append(self.theorypeaks[i][0]) else: - # HO: I have removed this part as it seems redundant ringds also calls gethkls - # JPW: It was not redundant. theorypeaks is not defined anywhere else and you - # can't write a g-vector file without it. self.theorypeaks = self.unitcell.gethkls(ds) self.unitcell.makerings(ds) self.theoryds = self.unitcell.ringds - tths = [numpy.arcsin(w * dstar / 2) * 2 - for dstar in self.unitcell.ringds] + tths = [numpy.arcsin(w * dstar / 2) * 2 for dstar in self.unitcell.ringds] self.theorytth = transform.degrees(numpy.array(tths)) - def computegv(self): """ Using tth, eta and omega angles, compute x,y,z of spot @@ -492,9 +590,10 @@ def computegv(self): self.getcolumn("tth"), self.getcolumn("eta"), self.getcolumn("omega") * om_sgn, - self.parameterobj.get('wavelength'), - wedge=self.parameterobj.get('wedge'), - chi=self.parameterobj.get('chi')) + self.parameterobj.get("wavelength"), + wedge=self.parameterobj.get("wedge"), + chi=self.parameterobj.get("chi"), + ) self.addcolumn(gv[0], "gx") self.addcolumn(gv[1], "gy") @@ -516,15 +615,15 @@ def savegv(self, filename): Use crappy .ascii format from previous for now (testing) """ # self.parameterobj.update_other(self) - self.colfile.updateGeometry( self.parameterobj ) + self.colfile.updateGeometry(self.parameterobj) if self.unitcell is None: self.addcellpeaks() f = open(filename, "w") f.write(self.unitcell.tostring()) f.write("\n") pars = self.parameterobj.get_parameters() - f.write("# wavelength = %f\n" % (float(pars['wavelength']))) - f.write("# wedge = %f\n" % (float(pars['wedge']))) + f.write("# wavelength = %f\n" % (float(pars["wavelength"]))) + f.write("# wedge = %f\n" % (float(pars["wedge"]))) # Handle the axis direction somehow f.write("# axis %f %f %f\n" % tuple(self.getaxis())) # Put a copy of all the parameters in the gve file @@ -534,10 +633,9 @@ def savegv(self, filename): f.write("# %s = %s \n" % (k, pars[k])) f.write("# ds h k l\n") for peak in self.theorypeaks: - f.write("%10.7f %4d %4d %4d\n" % (peak[0], - peak[1][0], - peak[1][1], - peak[1][2])) + f.write( + "%10.7f %4d %4d %4d\n" % (peak[0], peak[1][0], peak[1][1], peak[1][2]) + ) tth = self.getcolumn("tth") ome = self.getcolumn("omega") eta = self.getcolumn("eta") @@ -554,13 +652,25 @@ def savegv(self, filename): f.write("# gx gy gz xc yc ds eta omega spot3d_id xl yl zl\n") print(numpy.maximum.reduce(ome), numpy.minimum.reduce(ome)) ds = 2 * numpy.sin(transform.radians(tth / 2)) / pars["wavelength"] - fmt = "%f "*8 + "%d " + "%f "*3 + "\n" + fmt = "%f " * 8 + "%d " + "%f " * 3 + "\n" for i in order: - f.write(fmt % (gx[i], gy[i], gz[i], - x[i], y[i], - ds[i], eta[i], ome[i], - spot3d_id[i], - xl[i], yl[i], zl[i])) + f.write( + fmt + % ( + gx[i], + gy[i], + gz[i], + x[i], + y[i], + ds[i], + eta[i], + ome[i], + spot3d_id[i], + xl[i], + yl[i], + zl[i], + ) + ) f.close() def write_colfile(self, filename): @@ -572,22 +682,21 @@ def write_colfile(self, filename): def write_graindex_gv(self, filename): from ImageD11 import write_graindex_gv - if ("gx" not in self.colfile.titles): + + if "gx" not in self.colfile.titles: self.computegv() - gv = [ self.getcolumn("gx"), - self.getcolumn("gy"), - self.getcolumn("gz") ] + gv = [self.getcolumn("gx"), self.getcolumn("gy"), self.getcolumn("gz")] - if ("sum_intensity" in self.colfile.titles): + if "sum_intensity" in self.colfile.titles: ints = self.getcolumn("sum_intensity") - elif ("avg_intensity" in self.colfile.titles) and \ - ("Number_of_pixels" in self.colfile.titles): - ints = self.getcolumn("sum_intensity") * \ - self.getcolumn("Number_of_pixels") - elif ("avg_intensity" in self.colfile.titles) and \ - ("npixels" in self.colfile.titles): - ints = self.getcolumn("sum_intensity") * \ - self.getcolumn("npixels") + elif ("avg_intensity" in self.colfile.titles) and ( + "Number_of_pixels" in self.colfile.titles + ): + ints = self.getcolumn("sum_intensity") * self.getcolumn("Number_of_pixels") + elif ("avg_intensity" in self.colfile.titles) and ( + "npixels" in self.colfile.titles + ): + ints = self.getcolumn("sum_intensity") * self.getcolumn("npixels") else: ints = numpy.zeros(self.colfile.nrows) @@ -597,18 +706,20 @@ def write_graindex_gv(self, filename): else: om_sgn = 1.0 - write_graindex_gv.write_graindex_gv(filename, - numpy.array(gv), - self.getcolumn("tth"), - self.getcolumn("eta"), - self.getcolumn("omega") * om_sgn, - ints, - self.unitcell) - + write_graindex_gv.write_graindex_gv( + filename, + numpy.array(gv), + self.getcolumn("tth"), + self.getcolumn("eta"), + self.getcolumn("omega") * om_sgn, + ints, + self.unitcell, + ) def write_pyFAI(self, filename, tthmin=0, tthmax=180): """ - Write file for Jerome Kieffer's pyFAI fitting routine to use and run the refinment... + Write file for Jerome Kieffer's pyFAI fitting routine to use and run + the refinment... """ try: import pyFAI @@ -623,16 +734,16 @@ def write_pyFAI(self, filename, tthmin=0, tthmax=180): # Assign observed peaks to rings self.wavelength = None self.indices = [] # which peaks used - self.tthc = [] # computed two theta values + self.tthc = [] # computed two theta values pars = self.parameterobj.get_parameters() - w = float(pars['wavelength']) - tol = float(pars['fit_tolerance']) + w = float(pars["wavelength"]) + tol = float(pars["fit_tolerance"]) tth, eta = self.compute_tth_eta() # Loop over calc peak positions - z = self.getcolumn('s_raw') - y = self.getcolumn('f_raw') + z = self.getcolumn("s_raw") + y = self.getcolumn("f_raw") for i, dsc in enumerate(self.theoryds): - tthcalc = math.asin(dsc * w / 2) * 360. / math.pi # degrees + tthcalc = math.asin(dsc * w / 2) * 360.0 / math.pi # degrees if tthcalc > tthmax: break elif tthcalc < tthmin: @@ -642,27 +753,26 @@ def write_pyFAI(self, filename, tthmin=0, tthmax=180): controlpoints.append_2theta_deg(list(zip(z[ind], y[ind])), tthcalc) controlpoints.save(filename) - # There is no spline, hence return return - #if len(controlpoints) == 0: + # if len(controlpoints) == 0: # logging.error("THe number of control point found in null !!! skipping optimization ") # return # - #if "spline" in pars: + # if "spline" in pars: # print("with spline") # #TODO: where is the spline file stored ??? # geoRef = pyFAI.geometryRefinement.GeometryRefinement(controlpoints.getList(), dist=0.1, splineFile=self.splineFile) - #else: + # else: # geoRef = pyFAI.geometryRefinement.GeometryRefinement(controlpoints.getList(), dist=0.1, pixel1=pars["z_size"] * 1e-6, pixel2=pars["y_size"] * 1e-6) - #geoRef.wavelength = w * 1e-10 - #previous = sys.maxint - #while previous > geoRef.chi2(): + # geoRef.wavelength = w * 1e-10 + # previous = sys.maxint + # while previous > geoRef.chi2(): # previous = geoRef.chi2() # geoRef.refine2(1000000) # print geoRef - #geoRef.save(os.path.splitext(filename)[0] + ".poni") + # geoRef.save(os.path.splitext(filename)[0] + ".poni") def save_tth_his(self, filename, bins, hist): """ @@ -675,6 +785,6 @@ def save_tth_his(self, filename, bins, hist): f.write("# Peaks: %s\n" % self.colfile.filename) f.write("# N. bins: %d\n" % len(bins)) f.write("# 2tth intensity\n") - for i in range(0,len(hist)): + for i in range(0, len(hist)): f.write("%10.7f %.7g \n" % (bins[i], hist[i])) f.close() diff --git a/ImageD11/unitcell.py b/ImageD11/unitcell.py index 6ebc5ad3..fbd4d58f 100644 --- a/ImageD11/unitcell.py +++ b/ImageD11/unitcell.py @@ -1,12 +1,8 @@ - - from __future__ import print_function, division ## Automatically adapted for numpy.oldnumeric Sep 06, 2007 by alter_code1.py - - # ImageD11_v0.4 Software for beamline ID11 # Copyright (C) 2005 Jon Wright # @@ -32,34 +28,36 @@ import numpy as np from numpy.linalg import inv from ImageD11 import cImageD11 -from xfab import tools, sg - - +from xfab import tools def radians(x): - return x*math.pi/180. + return x * math.pi / 180.0 -def degrees(x): - return x*180./math.pi +def degrees(x): + return x * 180.0 / math.pi -def cross(a,b): +def cross(a, b): """ a x b has length |a||b|sin(theta) """ - return np.array([ a[1]*b[2]-a[2]*b[1] , - a[2]*b[0]-b[2]*a[0] , - a[0]*b[1]-b[0]*a[1] ],float) - + return np.array( + [ + a[1] * b[2] - a[2] * b[1], + a[2] * b[0] - b[2] * a[0], + a[0] * b[1] - b[0] * a[1], + ], + float, + ) def norm2(a): """ Compute the unit 2 norm """ - return np.sqrt(np.dot(a,a)) + return np.sqrt(np.dot(a, a)) def unit(a): @@ -67,77 +65,79 @@ def unit(a): Normalise vector a to unit length """ try: - return a/norm2(a) + return a / norm2(a) except: - logging.error("cannot normalise to unit length a=%s"%(str(a))) + logging.error("cannot normalise to unit length a=%s" % (str(a))) raise # Systematic absences -def P(h,k,l): + +def P(h, k, l): return False -def A(h,k,l): - return (k+l)%2 != 0 -def B(h,k,l): - return (h+l)%2 != 0 +def A(h, k, l): + return (k + l) % 2 != 0 + -def C(h,k,l): - return (h+k)%2 != 0 +def B(h, k, l): + return (h + l) % 2 != 0 -def I(h,k,l): - return (h+k+l)%2 != 0 -def F(h,k,l): - return (h+k)%2!=0 or (h+l)%2!=0 or (k+l)%2!=0 +def C(h, k, l): + return (h + k) % 2 != 0 -def R(h,k,l): - return (-h+k+l)%3 != 0 +def I(h, k, l): + return (h + k + l) % 2 != 0 -outif = { - "P" : P , - "A" : I , - "B" : B , - "C" : C , - "I" : I , - "F" : F , - "R" : R} -def orient_BL( B, h1, h2, g1, g2): +def F(h, k, l): + return (h + k) % 2 != 0 or (h + l) % 2 != 0 or (k + l) % 2 != 0 + + +def R(h, k, l): + return (-h + k + l) % 3 != 0 + + +outif = {"P": P, "A": I, "B": B, "C": C, "I": I, "F": F, "R": R} + + +def orient_BL(B, h1, h2, g1, g2): """Algorithm was intended to follow this one using 2 indexed reflections. W. R. Busing and H. A. Levy. 457. Acta Cryst. (1967). 22, 457 """ - h1c=np.dot(B,h1) # cartesian H1 - h2c=np.dot(B,h2) # cartesian H2 - t1c=unit(h1c) # unit vector along H1 - t3c=unit(np.cross(h1c,h2c)) - t2c=unit(np.cross(h1c,t3c)) - t1g=unit(g1) - t3g=unit(np.cross(g1,g2)) - t2g=unit(np.cross(g1,t3g)) - T_g = np.transpose(np.array([t1g,t2g,t3g])) # Array are stored by rows and - T_c = np.transpose(np.array([t1c,t2c,t3c])) # these are columns - U=np.dot(T_g , np.linalg.inv(T_c)) - UB=np.dot(U,B) - UBI=np.linalg.inv(UB) + h1c = np.dot(B, h1) # cartesian H1 + h2c = np.dot(B, h2) # cartesian H2 + t1c = unit(h1c) # unit vector along H1 + t3c = unit(np.cross(h1c, h2c)) + t2c = unit(np.cross(h1c, t3c)) + t1g = unit(g1) + t3g = unit(np.cross(g1, g2)) + t2g = unit(np.cross(g1, t3g)) + T_g = np.transpose(np.array([t1g, t2g, t3g])) # Array are stored by rows and + T_c = np.transpose(np.array([t1c, t2c, t3c])) # these are columns + U = np.dot(T_g, np.linalg.inv(T_c)) + UB = np.dot(U, B) + UBI = np.linalg.inv(UB) return UBI, UB + def cosangles_many(ha, hb, gi): - """ Finds the cosines of angles between two lists of hkls in - reciprocal metric gi """ - assert len(ha[0])==3 and len(hb[0])==3 + """Finds the cosines of angles between two lists of hkls in + reciprocal metric gi""" + assert len(ha[0]) == 3 and len(hb[0]) == 3 na = len(ha) nb = len(hb) hag = np.dot(ha, gi) hbg = np.dot(hb, gi) - hagha = np.sqrt((hag*ha).sum(axis=1)) - hbghb = np.sqrt((hbg*hb).sum(axis=1)) + hagha = np.sqrt((hag * ha).sum(axis=1)) + hbghb = np.sqrt((hbg * hb).sum(axis=1)) haghb = np.dot(ha, hbg.T) - ca = haghb / np.outer( hagha, hbghb ) + ca = haghb / np.outer(hagha, hbghb) assert ca.shape == (na, nb) return ca @@ -149,115 +149,141 @@ def cellfromstring(s): try: symm = items[6] except IndexError: - symm = 'P' + symm = "P" return unitcell(latt, symm) + class unitcell: # Unit cell stuff # Generate a list of peaks from a unit cell - def __init__(self, lattice_parameters, symmetry = "P", verbose = 0 ): + def __init__(self, lattice_parameters, symmetry="P", verbose=0): """ Unit cell class supply a list (tuple etc) of a,b,c,alpha,beta,gamma optionally a symmetry, one of "P","A","B","C","I","F","R" """ self.lattice_parameters = np.array(lattice_parameters) - if self.lattice_parameters.shape[0]!=6: - raise Exception("You must supply 6 lattice parameters\n"+\ - " a,b,c,alpha,beta,gamma") + if self.lattice_parameters.shape[0] != 6: + raise Exception( + "You must supply 6 lattice parameters\n" + + " a,b,c,alpha,beta,gamma" + ) self.symmetry = symmetry - if self.symmetry not in ["P","A","B","C","I","F","R"]: - raise Exception("Your symmetry "+self.symmetry+\ - " was not recognised") + if self.symmetry not in ["P", "A", "B", "C", "I", "F", "R"]: + raise Exception("Your symmetry " + self.symmetry + " was not recognised") # assigning a function here! self.absent = outif[self.symmetry] a = self.lattice_parameters[0] b = self.lattice_parameters[1] c = self.lattice_parameters[2] - self.alpha=radians(self.lattice_parameters[3]) - ca= math.cos(radians(self.lattice_parameters[3])) - cb= math.cos(radians(self.lattice_parameters[4])) - cg= math.cos(radians(self.lattice_parameters[5])) - if verbose==1: print("Unit cell",self.lattice_parameters) - self.g = np.array( [[ a*a , a*b*cg, a*c*cb ], - [ a*b*cg , b*b , b*c*ca ], - [ a*c*cb , b*c*ca, c*c ]],float) - if verbose==1: print("Metric tensor\n",self.g) + self.alpha = radians(self.lattice_parameters[3]) + ca = math.cos(radians(self.lattice_parameters[3])) + cb = math.cos(radians(self.lattice_parameters[4])) + cg = math.cos(radians(self.lattice_parameters[5])) + if verbose == 1: + print("Unit cell", self.lattice_parameters) + self.g = np.array( + [ + [a * a, a * b * cg, a * c * cb], + [a * b * cg, b * b, b * c * ca], + [a * c * cb, b * c * ca, c * c], + ], + float, + ) + if verbose == 1: + print("Metric tensor\n", self.g) try: self.gi = inv(self.g) except: - raise Exception("Unit cell was degenerate, could not determine"+\ - "reciprocal metric tensor") - if verbose==1: print("Reciprocal Metric tensor\n",self.gi) - self.astar=np.sqrt(self.gi[0,0]) - self.bstar=np.sqrt(self.gi[1,1]) - self.cstar=np.sqrt(self.gi[2,2]) - - self.alphas=degrees(math.acos(self.gi[1,2]/self.bstar/self.cstar)) - self.betas =degrees(math.acos(self.gi[0,2]/self.astar/self.cstar)) - self.gammas=degrees(math.acos(self.gi[0,1]/self.astar/self.bstar)) - if verbose==1: print("Reciprocal cell") - if verbose==1: - print(self.astar, self.bstar, self.cstar, \ - self.alphas, self.betas, self.gammas) + raise Exception( + "Unit cell was degenerate, could not determine" + + "reciprocal metric tensor" + ) + if verbose == 1: + print("Reciprocal Metric tensor\n", self.gi) + self.astar = np.sqrt(self.gi[0, 0]) + self.bstar = np.sqrt(self.gi[1, 1]) + self.cstar = np.sqrt(self.gi[2, 2]) + + self.alphas = degrees(math.acos(self.gi[1, 2] / self.bstar / self.cstar)) + self.betas = degrees(math.acos(self.gi[0, 2] / self.astar / self.cstar)) + self.gammas = degrees(math.acos(self.gi[0, 1] / self.astar / self.bstar)) + if verbose == 1: + print("Reciprocal cell") + if verbose == 1: + print( + self.astar, self.bstar, self.cstar, self.alphas, self.betas, self.gammas + ) # Equation 3 from Busing and Levy - self.B = np.array ( - [ [ self.astar , - self.bstar*math.cos(radians(self.gammas)) , - self.cstar*math.cos(radians(self.betas)) ] , - [ 0 , - self.bstar*math.sin(radians(self.gammas)) , - -self.cstar*math.sin(radians(self.betas))*ca ], - [ 0 , 0 , - 1./c ] ] , float) - if verbose == 1: print(self.B) + self.B = np.array( + [ + [ + self.astar, + self.bstar * math.cos(radians(self.gammas)), + self.cstar * math.cos(radians(self.betas)), + ], + [ + 0, + self.bstar * math.sin(radians(self.gammas)), + -self.cstar * math.sin(radians(self.betas)) * ca, + ], + [0, 0, 1.0 / c], + ], + float, + ) if verbose == 1: - print(np.dot( np.transpose(self.B), - self.B)-self.gi) # this should be zero + print(self.B) + if verbose == 1: + print(np.dot(np.transpose(self.B), self.B) - self.gi) # this should be zero self.hkls = None self.peaks = None self.limit = 0 self.ringtol = 0.001 # used for caching - self.anglehkl_cache = { "ringtol" : self.ringtol , - "B" : self.B, - "BI" : np.linalg.inv(self.B) } - + self.anglehkl_cache = { + "ringtol": self.ringtol, + "B": self.B, + "BI": np.linalg.inv(self.B), + } def tostring(self): """ Write out a line containing unit cell information """ - return "%f %f %f %f %f %f %s"%(self.lattice_parameters[0], - self.lattice_parameters[1], - self.lattice_parameters[2], - self.lattice_parameters[3], - self.lattice_parameters[4], - self.lattice_parameters[5], - self.symmetry) - - - def gethkls_xfab(self,dsmax,spg): + return "%f %f %f %f %f %f %s" % ( + self.lattice_parameters[0], + self.lattice_parameters[1], + self.lattice_parameters[2], + self.lattice_parameters[3], + self.lattice_parameters[4], + self.lattice_parameters[5], + self.symmetry, + ) + + def gethkls_xfab(self, dsmax, spg): """ Generate hkl list Argument dsmax is the d* limit (eg 1/d) Argument spg is the space group name, e.g. 'R3-c' """ - stl_max = dsmax/2. - raw_peaks = tools.genhkl_all(self.lattice_parameters, - 0 , stl_max, - sgname=spg, - output_stl=True) + stl_max = dsmax / 2.0 + raw_peaks = tools.genhkl_all( + self.lattice_parameters, 0, stl_max, sgname=spg, output_stl=True + ) peaks = [] for i in range(len(raw_peaks)): - peaks.append([raw_peaks[i,3]*2, - (raw_peaks[i,0],raw_peaks[i,1],raw_peaks[i,2])]) + peaks.append( + [ + raw_peaks[i, 3] * 2, + (raw_peaks[i, 0], raw_peaks[i, 1], raw_peaks[i, 2]), + ] + ) self.peaks = peaks self.limit = dsmax return peaks - def gethkls(self,dsmax): + def gethkls(self, dsmax): """ Generate hkl list Argument dsmax is the d* limit (eg 1/d) @@ -267,72 +293,71 @@ def gethkls(self,dsmax): """ if dsmax == self.limit and self.peaks is not None: return self.peaks - h=k=0 - l=1 # skip 0,0,0 - hs=ks=ls=1 - b=0 - peaks=[] - while abs(h)<200: # H - while abs(k)<200: # K - while abs(l)<200: #L - ds=self.ds([h,k,l]) + h = k = 0 + l = 1 # skip 0,0,0 + hs = ks = ls = 1 + b = 0 + peaks = [] + while abs(h) < 200: # H + while abs(k) < 200: # K + while abs(l) < 200: # L + ds = self.ds([h, k, l]) if ds < dsmax: - if not self.absent(h,k,l): - peaks.append([ds,(h,k,l)]) + if not self.absent(h, k, l): + peaks.append([ds, (h, k, l)]) else: pass - b=0 + b = 0 else: - if ls==1: - ls=-1 - l=0 + if ls == 1: + ls = -1 + l = 0 else: - ls=1 - l=0 - b=b+1 + ls = 1 + l = 0 + b = b + 1 break - l=l+ls - k=k+ks + l = l + ls + k = k + ks # l is always zero here - if b>1: - if ks==1: - ks=-1 - k=-1 + if b > 1: + if ks == 1: + ks = -1 + k = -1 else: - ks=1 - k=0 - b=b+1 + ks = 1 + k = 0 + b = b + 1 break - h=h+hs - if b>3: - if hs==1: - hs=-1 - h=-1 + h = h + hs + if b > 3: + if hs == 1: + hs = -1 + h = -1 else: - hs=1 - h=0 + hs = 1 + h = 0 break peaks.sort() - self.peaks=peaks - self.limit=dsmax + self.peaks = peaks + self.limit = dsmax return peaks - def ds(self,h): - """ computes 1/d for this hkl = hgh """ - return math.sqrt(np.dot(h,np.dot(self.gi,h))) # 1/d or d* - + def ds(self, h): + """computes 1/d for this hkl = hgh""" + return math.sqrt(np.dot(h, np.dot(self.gi, h))) # 1/d or d* - def makerings(self,limit,tol=0.001): + def makerings(self, limit, tol=0.001): """ Makes a list of computed powder rings The tolerance is the difference in d* to decide if two peaks overlap """ - self.peaks=self.gethkls(limit+tol) # [ ds, [hkl] ] - self.ringds=[] # a list of floats - self.ringhkls={} # a dict of lists of integer hkl + self.peaks = self.gethkls(limit + tol) # [ ds, [hkl] ] + self.ringds = [] # a list of floats + self.ringhkls = {} # a dict of lists of integer hkl # Append first peak peak = self.peaks[0] self.ringds.append(peak[0]) @@ -342,26 +367,26 @@ def makerings(self,limit,tol=0.001): self.ringhkls[self.ringds[-1]].append(peak[1]) else: self.ringds.append(peak[0]) - self.ringhkls[self.ringds[-1]]= [peak[1]] + self.ringhkls[self.ringds[-1]] = [peak[1]] self.ringtol = tol - def anglehkls(self,h1,h2): + def anglehkls(self, h1, h2): """ Compute the angle between reciprocal lattice vectors h1, h2 """ - g1 = np.dot(h1,np.dot(self.gi,h1)) - g2 = np.dot(h2,np.dot(self.gi,h2)) - g12= np.dot(h1,np.dot(self.gi,h2)) - costheta = g12/math.sqrt(g1*g2) + g1 = np.dot(h1, np.dot(self.gi, h1)) + g2 = np.dot(h2, np.dot(self.gi, h2)) + g12 = np.dot(h1, np.dot(self.gi, h2)) + costheta = g12 / math.sqrt(g1 * g2) try: - return degrees(math.acos(costheta)),costheta + return degrees(math.acos(costheta)), costheta except: - if abs(costheta-1) < 1e-6: - return 0.,1.0 - if abs(costheta+1) < 1e-6: - return 180.,-1.0 + if abs(costheta - 1) < 1e-6: + return 0.0, 1.0 + if abs(costheta + 1) < 1e-6: + return 180.0, -1.0 print("Error in unit cell class determining angle") - print("h1",h1,"h2",h2,"Costheta=",costheta) + print("h1", h1, "h2", h2, "Costheta=", costheta) raise def getanglehkls(self, ring1, ring2): @@ -369,26 +394,29 @@ def getanglehkls(self, ring1, ring2): Cache the previous pairs called for sorted by cos2angle """ - if self.ringtol != self.anglehkl_cache['ringtol'] or \ - (self.B != self.anglehkl_cache['B']).any(): - self.anglehkl_cache = {'ringtol':self.ringtol, - 'B':self.B, - 'BI':np.linalg.inv(self.B) } - key = (ring1,ring2) - B = self.anglehkl_cache['B'] - BI = self.anglehkl_cache['BI'] + if ( + self.ringtol != self.anglehkl_cache["ringtol"] + or (self.B != self.anglehkl_cache["B"]).any() + ): + self.anglehkl_cache = { + "ringtol": self.ringtol, + "B": self.B, + "BI": np.linalg.inv(self.B), + } + key = (ring1, ring2) + B = self.anglehkl_cache["B"] + BI = self.anglehkl_cache["BI"] if key not in self.anglehkl_cache: h1 = self.ringhkls[self.ringds[ring1]] h2 = self.ringhkls[self.ringds[ring2]] - cangs = cosangles_many( h1, h2, self.gi ) - val = filter_pairs( h1, h2, cangs, B, BI ) + cangs = cosangles_many(h1, h2, self.gi) + val = filter_pairs(h1, h2, cangs, B, BI) self.anglehkl_cache[key] = val else: val = self.anglehkl_cache[key] return val - - def orient(self,ring1,g1,ring2,g2,verbose=0, crange = -1.): + def orient(self, ring1, g1, ring2, g2, verbose=0, crange=-1.0): """ Compute an orientation matrix using cell parameters and the indexing of two reflections @@ -400,75 +428,103 @@ def orient(self,ring1,g1,ring2,g2,verbose=0, crange = -1.): t2 is in the plane of both (unit vector along g1x(g1xg2)) t3 is perpendicular to both (unit vector along g1xg2) """ - costheta = np.dot( g1, g2 ) / np.sqrt( (g1*g1).sum()*(g2*g2).sum() ) - hab, c2ab, matrs = self.getanglehkls( ring1, ring2 ) + costheta = np.dot(g1, g2) / np.sqrt((g1 * g1).sum() * (g2 * g2).sum()) + hab, c2ab, matrs = self.getanglehkls(ring1, ring2) if crange > 0: - best = np.arange( len(c2ab), dtype=int)[ abs( c2ab - costheta ) < crange ] + best = np.arange(len(c2ab), dtype=int)[abs(c2ab - costheta) < crange] if verbose == 1: print("possible best", best, len(c2ab)) else: - i = np.searchsorted(c2ab, costheta, side='left' ) - if i > 0 and (i == len(c2ab) or - (fabs(costheta - c2ab[i-1]) < fabs(costheta - c2ab[i]))): - best = [i-1,] + i = np.searchsorted(c2ab, costheta, side="left") + if i > 0 and ( + i == len(c2ab) + or (fabs(costheta - c2ab[i - 1]) < fabs(costheta - c2ab[i])) + ): + best = [ + i - 1, + ] else: - best = [i,] - if verbose==1: - print("g1, g2",g1,g2) - print("observed cos2theta",costheta) - print("hab, c2ab",hab,c2ab) - print("best",best) + best = [ + i, + ] + if verbose == 1: + print("g1, g2", g1, g2) + print("observed cos2theta", costheta) + print("hab, c2ab", hab, c2ab) + print("best", best) self.UBIlist = [] - UBlist=[] + UBlist = [] for b in best: h1, h2 = hab[b] - if verbose==1: - print("Assigning h1",h1,g1,self.ds(h1),\ - math.sqrt(np.dot(g1,g1)),\ - self.ds(h1)-math.sqrt(np.dot(g1,g1))) - print("Assigning h2",h2,g2,self.ds(h2),\ - math.sqrt(np.dot(g2,g2)),\ - self.ds(h1)-math.sqrt(np.dot(g1,g1))) - print("Cos angle calc",self.anglehkls(h1,h2), - "obs",costheta,"c2ab",c2ab[b]) + if verbose == 1: + print( + "Assigning h1", + h1, + g1, + self.ds(h1), + math.sqrt(np.dot(g1, g1)), + self.ds(h1) - math.sqrt(np.dot(g1, g1)), + ) + print( + "Assigning h2", + h2, + g2, + self.ds(h2), + math.sqrt(np.dot(g2, g2)), + self.ds(h1) - math.sqrt(np.dot(g1, g1)), + ) + print( + "Cos angle calc", + self.anglehkls(h1, h2), + "obs", + costheta, + "c2ab", + c2ab[b], + ) BT = matrs[b] - UBI = np.empty((3,3), float) + UBI = np.empty((3, 3), float) UBI[0] = g1 UBI[1] = g2 - cImageD11.quickorient( UBI, BT ) - if verbose==1: + cImageD11.quickorient(UBI, BT) + if verbose == 1: print("UBI") print(UBI) - h=np.dot(UBI,g1) - print("(%9.3f, %9.3f, %9.3f)"%(h[0],h[1],h[2])) - h=np.dot(UBI,g2) - print("(%9.3f, %9.3f, %9.3f)"%(h[0],h[1],h[2])) - self.UBI=UBI - self.UB=np.linalg.inv(UBI) + h = np.dot(UBI, g1) + print("(%9.3f, %9.3f, %9.3f)" % (h[0], h[1], h[2])) + h = np.dot(UBI, g2) + print("(%9.3f, %9.3f, %9.3f)" % (h[0], h[1], h[2])) + self.UBI = UBI + self.UB = np.linalg.inv(UBI) self.UBIlist.append(UBI) UBlist.append(self.UB) # trim to uniq list? What about small distortions... - self.UBIlist = ubi_equiv( self.UBIlist, UBlist ) + self.UBIlist = ubi_equiv(self.UBIlist, UBlist) -def BTmat( h1, h2, B, BI ): - """ used for computing orientations - """ - g1 = np.dot( B, h1 ) # gvectors for these hkl - g2 = np.dot( B, h2 ) - g3 = np.cross( g1, g2 ) - u1 = unit(g1) # normalised + +def BTmat(h1, h2, B, BI): + """used for computing orientations""" + g1 = np.dot(B, h1) # gvectors for these hkl + g2 = np.dot(B, h2) + g3 = np.cross(g1, g2) + u1 = unit(g1) # normalised u3 = unit(g3) u2 = np.cross(u1, u3) - BT = np.dot( BI, np.transpose((u1, u2, u3))) + BT = np.dot(BI, np.transpose((u1, u2, u3))) return BT -HKL0 = np.array( [ [0,0,1,1,-1,1,-1,0, 0, 1, -1, 1, 1, 3, 11], - [0,1,0,1, 1,0, 0,1,-1, 1, 1,-1, 1, 2, 12], - [1,0,0,0, 0,1, 1,1, 1, 1, 1, 1,-1, 1, 13] ], float ) # first unit cell + +HKL0 = np.array( + [ + [0, 0, 1, 1, -1, 1, -1, 0, 0, 1, -1, 1, 1, 3, 11], + [0, 1, 0, 1, 1, 0, 0, 1, -1, 1, 1, -1, 1, 2, 12], + [1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, -1, 1, 13], + ], + float, +) # first unit cell -def filter_pairs( h1, h2, c2a, B, BI, tol = 1e-5): - """ remove duplicate pairs for orientation searches +def filter_pairs(h1, h2, c2a, B, BI, tol=1e-5): + """remove duplicate pairs for orientation searches h1 = reflections of ring1, N1 peaks h2 = reflections of ring2, N2 peaks c2a = cos angle between them, N1xN2 @@ -476,96 +532,112 @@ def filter_pairs( h1, h2, c2a, B, BI, tol = 1e-5): BI = inverse in real space """ assert c2a.shape == (len(h1), len(h2)) - order = np.argsort( c2a.ravel() ) # increasing in cosine of angle + order = np.argsort(c2a.ravel()) # increasing in cosine of angle c2as = c2a.flat[order] - hi, hj= np.mgrid[0:len(h1),0:len(h2)] + hi, hj = np.mgrid[0 : len(h1), 0 : len(h2)] hi = hi.ravel()[order] # to get back the peaks hj = hj.ravel()[order] # Results holders: - pairs = [ ] - cangs = [ ] - matrs = [ ] + pairs = [] + cangs = [] + matrs = [] # cluster the cangs assuming a sensible threshold dc = (c2as[1:] - c2as[:-1]) > 1e-8 # differences - inds = list(np.arange( 1, len(dc) + 1, dtype=int )[dc]) + [len(c2as)-1,] - p = 0 # previous + inds = list(np.arange(1, len(dc) + 1, dtype=int)[dc]) + [ + len(c2as) - 1, + ] + p = 0 # previous for i in inds: - c = c2as[p:i] # block is p:i - if abs(c2as[p]) < 0.98: # always keep the first one + c = c2as[p:i] # block is p:i + if abs(c2as[p]) < 0.98: # always keep the first one ha = h1[hi[p]] hb = h2[hj[p]] - pairs.append( (ha, hb) ) - cangs.append( c2as[p] ) - BT = BTmat( ha, hb, B, BI) - matrs.append( BT ) + pairs.append((ha, hb)) + cangs.append(c2as[p]) + BT = BTmat(ha, hb, B, BI) + matrs.append(BT) else: p = i continue if len(c) == 1: p = i continue - assert (c.max()-c.min()) < 2.1e-8, "Angles blocking error in filter_pairs" + assert (c.max() - c.min()) < 2.1e-8, "Angles blocking error in filter_pairs" # here we have a bunch of hkl pairs which all give the same angle # between them. They are not all the same. We generate a pair of peaks # from the first one and see which other pairs index differently - ga = np.dot(B, ha ) - gb = np.dot(B, hb ) - assert abs( np.dot(ga,gb)/np.sqrt(np.dot(ga,ga)*np.dot(gb,gb)) - c2as[p] ) < 2e-8, "mixup in filter_pairs" - gobs = np.array( (ga, gb, (0,0,0)), float) + ga = np.dot(B, ha) + gb = np.dot(B, hb) + assert ( + abs(np.dot(ga, gb) / np.sqrt(np.dot(ga, ga) * np.dot(gb, gb)) - c2as[p]) + < 2e-8 + ), "mixup in filter_pairs" + gobs = np.array((ga, gb, (0, 0, 0)), float) UBI = gobs.copy() - cImageD11.quickorient( UBI, BT ) - gtest = [ np.dot( np.linalg.inv(UBI), HKL0 ).T.copy(), ] - for j in range(p+1,i): + cImageD11.quickorient(UBI, BT) + gtest = [ + np.dot(np.linalg.inv(UBI), HKL0).T.copy(), + ] + for j in range(p + 1, i): ha = h1[hi[j]] hb = h2[hj[j]] - BT = BTmat( ha, hb, B, BI) + BT = BTmat(ha, hb, B, BI) newpair = True for gt in gtest: UBI = gobs.copy() - cImageD11.quickorient( UBI, BT ) + cImageD11.quickorient(UBI, BT) npk = cImageD11.score(UBI, gt, 1e-6) if npk == len(HKL0[0]): newpair = False break if newpair: - pairs.append( (ha, hb) ) - cangs.append( c2as[j] ) - matrs.append( BT ) - gtest.append( np.dot( np.linalg.inv(UBI), HKL0 ).T.copy() ) + pairs.append((ha, hb)) + cangs.append(c2as[j]) + matrs.append(BT) + gtest.append(np.dot(np.linalg.inv(UBI), HKL0).T.copy()) p = i return pairs, cangs, matrs -def ubi_equiv( ubilist, ublist, tol=1e-8): - """ Two ubi are considered equivalent if they both index the peaks + +def ubi_equiv(ubilist, ublist, tol=1e-8): + """Two ubi are considered equivalent if they both index the peaks in the HKL0 array exactly""" if len(ubilist) < 2: return ubilist - order = np.argsort( [np.trace( ubi ) for ubi in ubilist ] ) # low to high - uniq = [ ubilist[ order[-1] ], ] - refgv = [ np.dot( ublist[ order[-1]] , HKL0 ), ] + order = np.argsort([np.trace(ubi) for ubi in ubilist]) # low to high + uniq = [ + ubilist[order[-1]], + ] + refgv = [ + np.dot(ublist[order[-1]], HKL0), + ] for i in order[:-1][::-1]: ubi = ubilist[i] score = 1 - for pks in refgv: # pks is (n,3) for an orientation - hcalc = np.dot( ubi, pks ) - score = min(score, np.abs( np.round( hcalc ) - hcalc).sum() ) - if score <= tol: # matches a previous grain + for pks in refgv: # pks is (n,3) for an orientation + hcalc = np.dot(ubi, pks) + score = min(score, np.abs(np.round(hcalc) - hcalc).sum()) + if score <= tol: # matches a previous grain break - if score > tol: # is a new orientation - uniq.append( ubi ) - refgv.append( np.dot( np.linalg.inv( ubi ) , HKL0 ) ) + if score > tol: # is a new orientation + uniq.append(ubi) + refgv.append(np.dot(np.linalg.inv(ubi), HKL0)) return uniq def unitcell_from_parameters(pars): parnames = "_a _b _c alpha beta gamma".split() - cell = unitcell([pars.get("cell_%s"%(s)) for s in parnames], - pars.get("cell_lattice_[P,A,B,C,I,F,R]")) + cell = unitcell( + [pars.get("cell_%s" % (s)) for s in parnames], + pars.get("cell_lattice_[P,A,B,C,I,F,R]"), + ) return cell -if __name__=="__main__": - import sys,time - start=time.time() - cell = unitcell([float(x) for x in sys.argv[1:7]],sys.argv[7]) + +if __name__ == "__main__": + import sys, time + + start = time.time() + cell = unitcell([float(x) for x in sys.argv[1:7]], sys.argv[7]) cell.makerings(2) - cell.getanglehkls(11,12) + cell.getanglehkls(11, 12) diff --git a/ImageD11/weighted_kde.py b/ImageD11/weighted_kde.py index 214541c7..40a2c81a 100644 --- a/ImageD11/weighted_kde.py +++ b/ImageD11/weighted_kde.py @@ -12,6 +12,7 @@ from scipy.spatial.distance import cdist from six import string_types + class gaussian_kde(object): """Representation of a kernel-density estimate using Gaussian kernels. @@ -148,20 +149,21 @@ class gaussian_kde(object): >>> plt.show() """ + def __init__(self, dataset, bw_method=None, weights=None): self.dataset = np.atleast_2d(dataset) if not self.dataset.size > 1: raise ValueError("`dataset` input should have multiple elements.") self.d, self.n = self.dataset.shape - + if weights is not None: self.weights = weights / np.sum(weights) else: self.weights = np.ones(self.n) / self.n - - # Compute the effective sample size + + # Compute the effective sample size # http://surveyanalysis.org/wiki/Design_Effects_and_Effective_Sample_Size#Kish.27s_approximate_formula_for_computing_effective_sample_size - self.neff = 1.0 / np.sum(self.weights ** 2) + self.neff = 1.0 / np.sum(self.weights**2) self.set_bandwidth(bw_method=bw_method) @@ -194,24 +196,23 @@ def evaluate(self, points): points = np.reshape(points, (self.d, 1)) m = 1 else: - msg = "points have dimension %s, dataset has dimension %s" % (d, - self.d) + msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) # compute the normalised residuals - chi2 = cdist(points.T, self.dataset.T, 'mahalanobis', VI=self.inv_cov) ** 2 + chi2 = cdist(points.T, self.dataset.T, "mahalanobis", VI=self.inv_cov) ** 2 # compute the pdf - result = np.sum(np.exp(-.5 * chi2) * self.weights, axis=1) / self._norm_factor + result = np.sum(np.exp(-0.5 * chi2) * self.weights, axis=1) / self._norm_factor return result __call__ = evaluate def scotts_factor(self): - return np.power(self.neff, -1./(self.d+4)) + return np.power(self.neff, -1.0 / (self.d + 4)) def silverman_factor(self): - return np.power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4)) + return np.power(self.neff * (self.d + 2.0) / 4.0, -1.0 / (self.d + 4)) # Default method to calculate bandwidth, can be overwritten by subclass covariance_factor = scotts_factor @@ -260,19 +261,20 @@ def set_bandwidth(self, bw_method=None): """ if bw_method is None: pass - elif bw_method == 'scott': + elif bw_method == "scott": self.covariance_factor = self.scotts_factor - elif bw_method == 'silverman': + elif bw_method == "silverman": self.covariance_factor = self.silverman_factor elif np.isscalar(bw_method) and not isinstance(bw_method, string_types): - self._bw_method = 'use constant' + self._bw_method = "use constant" self.covariance_factor = lambda: bw_method elif callable(bw_method): self._bw_method = bw_method self.covariance_factor = lambda: self._bw_method(self) else: - msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ - "or a callable." + msg = ( + "`bw_method` should be 'scott', 'silverman', a scalar " "or a callable." + ) raise ValueError(msg) self._compute_covariance() @@ -283,16 +285,20 @@ def _compute_covariance(self): """ self.factor = self.covariance_factor() # Cache covariance and inverse covariance of the data - if not hasattr(self, '_data_inv_cov'): + if not hasattr(self, "_data_inv_cov"): # Compute the mean and residuals _mean = np.sum(self.weights * self.dataset, axis=1) - _residual = (self.dataset - _mean[:, None]) + _residual = self.dataset - _mean[:, None] # Compute the biased covariance - self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T)) + self._data_covariance = np.atleast_2d( + np.dot(_residual * self.weights, _residual.T) + ) # Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance) - self._data_covariance /= (1 - np.sum(self.weights ** 2)) + self._data_covariance /= 1 - np.sum(self.weights**2) self._data_inv_cov = np.linalg.inv(self._data_covariance) self.covariance = self._data_covariance * self.factor**2 self.inv_cov = self._data_inv_cov / self.factor**2 - self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) #* self.n \ No newline at end of file + self._norm_factor = np.sqrt( + np.linalg.det(2 * np.pi * self.covariance) + ) # * self.n diff --git a/ImageD11/write_graindex_gv.py b/ImageD11/write_graindex_gv.py index 2b749dad..735ce729 100644 --- a/ImageD11/write_graindex_gv.py +++ b/ImageD11/write_graindex_gv.py @@ -1,6 +1,8 @@ - from __future__ import print_function +import numpy as np +from math import sqrt, pi + # ImageD11_v0.4 Software for beamline ID11 # Copyright (C) 2005 Jon Wright and Soren Schmidt # @@ -24,22 +26,19 @@ which is suitable for input into graindex """ -import numpy as np - -from math import sqrt, pi -def make_ds_list(cell,limit=2.): +def make_ds_list(cell, limit=2.0): """ Generates a list of d-spacings """ print("Generating hkls with unit cell:", cell) cell.makerings(limit) - ds_list=[] - keys = list(cell.ringhkls.keys()) # d-spacings + ds_list = [] + keys = list(cell.ringhkls.keys()) # d-spacings keys.sort() - ptype=0 + ptype = 0 for ky in keys: - ptype=ptype+1 + ptype = ptype + 1 ds = ky hklmax = -1e10 hmax = -1e10 @@ -48,21 +47,22 @@ def make_ds_list(cell,limit=2.): hkls_in_ring.sort() hkls_in_ring.reverse() print(hkls_in_ring) - for h,k,l in hkls_in_ring: - if h+k+l >= hklmax and h >= hmax and k>kmax: - hklmax = h+k+l + for h, k, l in hkls_in_ring: + if h + k + l >= hklmax and h >= hmax and k > kmax: + hklmax = h + k + l hmax = h kmax = k - ds_string="(%d%d%d)"%(h,k,l) - ds_list.append([ds,ptype,ds_string]) + ds_string = "(%d%d%d)" % (h, k, l) + ds_list.append([ds, ptype, ds_string]) return ds_list -def get_ds_string(g,ds_list): + +def get_ds_string(g, ds_list): """ Attempt to emulate the graindex (hkl) syntax. Obviously you would not want a (10,0,0) peak! """ - length = sqrt(g[0]*g[0]+g[1]*g[1]+g[2]*g[2]) + length = sqrt(g[0] * g[0] + g[1] * g[1] + g[2] * g[2]) # this is 1/d min_diff = abs(length - ds_list[0][0]) ptype = ds_list[0][1] @@ -70,36 +70,39 @@ def get_ds_string(g,ds_list): for item in ds_list[1:]: diff = abs(length - item[0]) if diff < min_diff: - min_diff=diff + min_diff = diff ptype = item[1] ds_string = item[2] return ds_string, ptype -def write_graindex_gv(outfilename,gv,tth,eta,omega,intensity,unitcell): +def write_graindex_gv(outfilename, gv, tth, eta, omega, intensity, unitcell): """ call with array of gvectors, tth, eta and omega vals Using an indexing object to get the ring assignments """ - outputfile = open(outfilename,"w") - ds_list=make_ds_list(unitcell) # + outputfile = open(outfilename, "w") + ds_list = make_ds_list(unitcell) # print(ds_list) order = np.argsort(tth) - nr=0 + nr = 0 for i in order: - nr=nr+1 - ds_string,ptype = get_ds_string(gv[:,i],ds_list) - outputfile.write("%i %f %f %f %s %i 0 0 0 %f %f %f 0 0 1 %.2f\n"%( - nr, # line number - omega[i], # Omega angle - eta[i], # eta angle - tth[i], # two_theta angle - ds_string, # hkl in format (111) or (222) etc - ptype, # assignment to hkl peaks - gv[0,i]*2*pi, # The g-vector - gv[1,i]*2*pi, - gv[2,i]*2*pi, - intensity[i] - )) + nr = nr + 1 + ds_string, ptype = get_ds_string(gv[:, i], ds_list) + outputfile.write( + "%i %f %f %f %s %i 0 0 0 %f %f %f 0 0 1 %.2f\n" + % ( + nr, # line number + omega[i], # Omega angle + eta[i], # eta angle + tth[i], # two_theta angle + ds_string, # hkl in format (111) or (222) etc + ptype, # assignment to hkl peaks + gv[0, i] * 2 * pi, # The g-vector + gv[1, i] * 2 * pi, + gv[2, i] * 2 * pi, + intensity[i], + ) + ) outputfile.close() diff --git a/scripts/ImageD11_gui.py b/scripts/ImageD11_gui.py index 806a9537..2d5a0478 100644 --- a/scripts/ImageD11_gui.py +++ b/scripts/ImageD11_gui.py @@ -43,7 +43,6 @@ import logging import sys -import os # GuiMaker is for building up the windows etc diff --git a/scripts/filtergrain.py b/scripts/filtergrain.py index ffe8cb1c..14a55d28 100644 --- a/scripts/filtergrain.py +++ b/scripts/filtergrain.py @@ -11,7 +11,6 @@ import sys, logging -import numpy as np from argparse import ArgumentParser from ImageD11 import refinegrains, indexing, grain, ImageD11options diff --git a/scripts/fitgrain.py b/scripts/fitgrain.py index db68b440..5dddffbe 100644 --- a/scripts/fitgrain.py +++ b/scripts/fitgrain.py @@ -9,7 +9,7 @@ are not a problem). """ from argparse import ArgumentParser -from ImageD11 import refinegrains, indexing, ImageD11options +from ImageD11 import refinegrains, ImageD11options import logging, sys def fitgrain(options): diff --git a/scripts/fix_spline.py b/scripts/fix_spline.py index 80e8c6ed..229830f7 100644 --- a/scripts/fix_spline.py +++ b/scripts/fix_spline.py @@ -52,8 +52,8 @@ def mymain(): sys.exit() outname = sys.argv[2] if os.path.exists(outname): - if not input("Sure you want to overwrite %s ?"%(outname) - )[0] in ['y','Y']: + if input("Sure you want to overwrite %s ?"%(outname) + )[0] not in ['y','Y']: sys.exit() splinename = sys.argv[3] if splinename == 'perfect': diff --git a/scripts/id11_summarize.py b/scripts/id11_summarize.py index 5d676511..387449b2 100644 --- a/scripts/id11_summarize.py +++ b/scripts/id11_summarize.py @@ -15,7 +15,6 @@ def getnumerics(d): pass return ret -import sys class headerfollower: diff --git a/scripts/makemap.py b/scripts/makemap.py index 5c0b58ed..dca29a87 100755 --- a/scripts/makemap.py +++ b/scripts/makemap.py @@ -3,11 +3,9 @@ from __future__ import print_function -from ImageD11.indexing import readubis, write_ubi_file from ImageD11.refinegrains import refinegrains import ImageD11.refinegrains -from ImageD11 import ImageD11options -import sys, os, argparse +import sys, argparse def makemap(options): diff --git a/scripts/merge_flt.py b/scripts/merge_flt.py index 457f827a..09a69694 100644 --- a/scripts/merge_flt.py +++ b/scripts/merge_flt.py @@ -21,7 +21,7 @@ from ImageD11 import transformer from ImageD11.columnfile import newcolumnfile, columnfile import numpy -import sys, time +import sys try: pars = sys.argv[1] diff --git a/scripts/peaksearch.py b/scripts/peaksearch.py index f4b9afd8..3f083040 100644 --- a/scripts/peaksearch.py +++ b/scripts/peaksearch.py @@ -36,8 +36,6 @@ Defines one function (peaksearch) which might be reused """ -import h5py -import hdf5plugin # first! import time # For benchmarking diff --git a/scripts/plotlayer.py b/scripts/plotlayer.py index 7be5592b..256fc63f 100644 --- a/scripts/plotlayer.py +++ b/scripts/plotlayer.py @@ -23,7 +23,6 @@ def make_ellipses( grains , scalet = 1.0, scaler=1.0 ): # Sort by volume to plot biggest first (at back) tmp = list(zip( vol, grains)) tmp.sort() - from ImageD11.indexing import ubitoU for vol, g in tmp[::-1]: size = pow(vol, 1.0/3.0) el.append( (Ellipse( xy = g.translation[0:2], @@ -121,7 +120,7 @@ def plubi(uf, first = False , fig=None): if __name__=="__main__": - import sys, os, glob, time + import sys diff --git a/scripts/spatialfix.py b/scripts/spatialfix.py index 1e836d3a..2f58d039 100644 --- a/scripts/spatialfix.py +++ b/scripts/spatialfix.py @@ -8,7 +8,6 @@ from ImageD11 import columnfile import fabio import sys, os -import numpy as np def spatialfix( cf, dx, dy, flip=""): if "V" in flip: