diff --git a/sandbox/jadb/test_refine.py b/sandbox/jadb/test_refine.py index 48546b14..167eb65a 100755 --- a/sandbox/jadb/test_refine.py +++ b/sandbox/jadb/test_refine.py @@ -23,6 +23,7 @@ from ImageD11 import sym_u from ImageD11 import transform as transformid11 from ImageD11.sinograms.point_by_point import PBPMap, PBPRefine +from ImageD11.sinograms.tensor_map import unitcell_to_b import xfab @@ -44,6 +45,7 @@ def get_point_data(params, xi0, yi0, peak_dict): y = xi0*peak_dict['sinomega'] + yi0*peak_dict['cosomega'] ydist = np.abs( y + peak_dict['dty'] ) m = ydist <= params['TRANSLATION_STAGE_DY_STEP'] + # print('Voxel mask got', m.sum(), 'peaks') # print(np.arange(len(peak_dict['dty']))[m]) return xpos, m, ydist @@ -79,6 +81,9 @@ def refine_point_axel(xi0, yi0, ubis, params, peak_dict, lattice): # get the masked peaks for this voxel _, local_mask, ydist = get_point_data(params, xi0, yi0, peak_dict) + print('First local mask results...') + print(peak_dict['sinomega'][local_mask].sum()) + # compute g-vectors at this voxel gve = get_gve(params, local_mask, peak_dict['xpos_refined'], peak_dict) @@ -100,9 +105,16 @@ def refine_point_axel(xi0, yi0, ubis, params, peak_dict, lattice): # which peaks at this voxel does this grain like? g.mask = g.m = (li == i) + + print('Assignment mask results...') + print(peak_dict['sinomega'][local_mask][g.mask].sum()) + + # print('Assigned', g.mask.sum(), 'peaks first time') # how many peaks does this grain have? g.npks = np.sum(g.mask) + print('Assigned', g.npks, 'peaks first time out of', local_mask.sum()) + hkl_double = np.dot( g.ubi, gve[:, g.mask] ) g.hkl = np.round ( hkl_double ).astype( int ) g.etasign = np.sign( peak_dict['eta'][ local_mask ][ g.m ] ) @@ -113,8 +125,13 @@ def refine_point_axel(xi0, yi0, ubis, params, peak_dict, lattice): peaktags = np.vstack( (g.hkl, g.etasign, peak_dict['iy'][ local_mask ][ g.m ]) ) unitags, labels = np.unique(peaktags, axis=1, return_inverse=True ) + print(labels[:10]) + print('Got', unitags.shape[1], 'unique peaks during merging') + print(unitags[:, 0].sum()) wI = peak_dict['sum_intensity'][ local_mask ][g.m] sI = np.bincount( labels, weights=wI ) + print('wI sum:', wI.sum()) + print('sI sum:', sI.sum()) merged['sum_intensity']=sI sc = peak_dict['sc'][ local_mask ][g.m] fc = peak_dict['fc'][ local_mask ][g.m] @@ -129,15 +146,30 @@ def refine_point_axel(xi0, yi0, ubis, params, peak_dict, lattice): for k in merged.keys(): merged[k] = np.array(merged[k]) - + + print('merged_sc sum:', merged['sc'].sum()) + merged['sinomega'] = np.sin( np.radians(merged['omega']) ) merged['cosomega'] = np.cos( np.radians(merged['omega']) ) - # recompute the peaks this grains likes with merged data + print('merged_sc has shape', merged['sc'].shape) + + print('Merged', len(sc), 'peaks to', len(merged['sc']), 'peaks') + + print('Merged peak data for masking...') + print('merged so sum', merged['sinomega'].sum()) + print('merged co sum', merged['cosomega'].sum()) + print('merged dty sum', merged['dty'].sum()) + + # recompute the peaks this grain likes with merged data _, local_mask_of_grain, ydist = get_point_data(params, xi0, yi0, merged) + print('Got', local_mask_of_grain.sum(), 'masked merged peaks') + print('ydist sum before merged masking', ydist.sum()) # recompute gvector of grain with merged data gve_grain = get_gve(params, local_mask_of_grain, merged['xpos_refined'], merged) + print('gve_grain sum before merged assignment', gve_grain.sum()) + # reassign this grain with merged peaks data labels = np.zeros( gve_grain.shape[1],'i')-1 drlv2 = np.ones( gve_grain.shape[1], 'd') @@ -145,14 +177,23 @@ def refine_point_axel(xi0, yi0, ubis, params, peak_dict, lattice): li = np.round(labels).astype(int).copy() g.mask = g.m = (li == i) g.npks = np.sum(g.m) + print('Assigned', g.npks, 'merged peaks out of', local_mask_of_grain.sum()) g.gve = gve_grain[:,g.mask] hkl_double = np.dot( g.ubi, gve_grain[:, g.mask] ) g.hkl = np.round ( hkl_double ).astype( int ) g.ydist = ydist[local_mask_of_grain][g.mask] + # print(g.npks, 'merged peaks assigned after merging') + # if grain has more than 6 peaks assigned if np.sum(g.mask) > 6: - + + + print('Going into refinement...') + print('ydist sum', g.ydist.sum()) + print('gve sum', g.gve.sum()) + print('hkl sum', g.hkl.sum()) + # a.T @ gve = h => gve.T @ a = h.T => a = np.linalg.pinv(gve.T) @ h.T, same for b and c w = (1. / (g.ydist + 1) ).reshape(g.gve.shape[1], 1) ubifitT, residuals, rank, sing_vals = np.linalg.lstsq( w * g.gve.T, w * g.hkl.T, rcond=None ) @@ -206,19 +247,30 @@ def refine_point_axel(xi0, yi0, ubis, params, peak_dict, lattice): #################################################################################################### # And now fit a strain tensor decoupling strain and orientation for precision. + + print('Strain mask fit sum', g.mask.sum()) + g.gve = gve_grain[:,g.mask] hkl_double = np.dot( g.ubi, gve_grain[:, g.mask] ) g.hkl = np.round ( hkl_double ).astype( int ) g.ydist = ydist[local_mask_of_grain][g.mask] + + print('gve_grain_strainfit sum:', g.gve.sum()) + print('ydist strainfit sum:', g.ydist.sum()) dzero_cell = [params['cell_'+key] for key in ('_a','_b','_c','alpha','beta','gamma')] B0 = xfab.tools.form_b_mat(dzero_cell) / (np.pi*2) + print('B0') + print(B0) g.gve0 = g.u @ B0 @ g.hkl gTg0 = np.sum(g.gve*g.gve0, axis=0) gTg = np.sum(g.gve*g.gve, axis=0) g.directional_strain = (gTg0/gTg) - 1 + + print('Directional strain') + print(g.directional_strain) kappa = g.gve / np.linalg.norm(g.gve, axis=0) kx, ky, kz = kappa @@ -239,6 +291,7 @@ def refine_point_axel(xi0, yi0, ubis, params, peak_dict, lattice): eps_vec = np.linalg.lstsq( w * M, w.flatten() * g.directional_strain, rcond=None )[0].T sxx, syy, szz, sxy, sxz, syz = eps_vec g.eps_tensor = np.array([[sxx, sxy, sxz],[sxy, syy, syz],[sxz, syz, szz]]) + print(g.eps_tensor) except: pass @@ -550,35 +603,64 @@ def score_and_assign(ubi, gvecs, tol, label): @numba.njit def count_unique_peaks(hkl, etasign, dtyi): - uniques = np.empty((5, etasign.shape[0]), dtype=etasign.dtype) + + # Combine the input arrays into one 2D array for easier sorting + combined = np.empty((5, etasign.shape[0]), dtype=dtyi.dtype) + combined[0:3, :] = hkl + combined[3, :] = etasign + combined[4, :] = dtyi + + # Create an array of indices to track the original order + indices = np.arange(combined.shape[1]) + + # Custom insertion sort to sort lexicographically + def lexicographical_sort(arr, inds): + for i in range(1, arr.shape[1]): + key = arr[:, i].copy() # Copy the ith column + key_index = inds[i] # Get the corresponding index + j = i - 1 + # Lexicographical comparison: compare all elements in the column + while j >= 0 and (arr[0, j] > key[0] or + (arr[0, j] == key[0] and arr[1, j] > key[1]) or + (arr[0, j] == key[0] and arr[1, j] == key[1] and arr[2, j] > key[2]) or + (arr[0, j] == key[0] and arr[1, j] == key[1] and arr[2, j] == key[2] and arr[3, j] > key[3]) or + (arr[0, j] == key[0] and arr[1, j] == key[1] and arr[2, j] == key[2] and arr[3, j] == key[3] and arr[4, j] > key[4])): + arr[:, j + 1] = arr[:, j] + inds[j + 1] = inds[j] # Shift the indices accordingly + j -= 1 + arr[:, j + 1] = key + inds[j + 1] = key_index + + # Sort the combined array lexicographically + lexicographical_sort(combined, indices) + + uniques = np.empty((5, etasign.shape[0]), dtype=dtyi.dtype) inverses = np.empty((etasign.shape[0]), dtype=np.int64) unique_ptr = 0 - # iterate through the input arrays - for idx in np.arange(etasign.shape[0], dtype=np.int64): - h, k, l = hkl[:,idx] - this_etasign = etasign[idx] - this_dtyi = dtyi[idx] + + # iterate through the sorted input array + for idx_sorted in np.arange(combined.shape[1], dtype=np.int64): + h, k, l, this_etasign, this_dtyi = combined[:, idx_sorted] + # go from the index of the sorted array back to the index of the original array + idx_combined = indices[idx_sorted] - # iterate through the unique array + # did we see this row in the unique array yet? seen_array = False - for idx_unique in np.arange(uniques.shape[0], dtype=np.int64): + # iterate through the unique array + for idx_unique in np.arange(unique_ptr, dtype=np.int64): + # get row in the unique array h_uniq, k_uniq, l_uniq, etasign_uniq, dtyi_uniq = uniques[:, idx_unique] if (h == h_uniq) & (k == k_uniq) & (l == l_uniq) & (this_etasign == etasign_uniq) & (this_dtyi == dtyi_uniq): - # saw it already!: - inverses[idx] = idx_unique + # saw it already! + inverses[idx_combined] = idx_unique seen_array = True + break + if not seen_array: # got to the end of uniques and didn't see it - # uniques[:, unique_ptr] = [h, k, l, this_etasign, this_dtyi] - uniques[0, unique_ptr] = h - uniques[1, unique_ptr] = k - uniques[2, unique_ptr] = l - uniques[3, unique_ptr] = this_etasign - uniques[4, unique_ptr] = this_dtyi - - inverses[idx] = unique_ptr + uniques[:, unique_ptr] = [h, k, l, this_etasign, this_dtyi] + inverses[idx_combined] = unique_ptr unique_ptr += 1 - return uniques[:,:unique_ptr], inverses @@ -586,8 +668,13 @@ def count_unique_peaks(hkl, etasign, dtyi): @numba.njit def merge(hkl, etasign, dtyi, sum_intensity, sc, fc, omega, dty, xpos_refined): unitags, labels = count_unique_peaks(hkl, etasign, dtyi) + print('Got', unitags.shape[1], 'unique peaks during merging') + print(unitags[:, 0].sum()) + print(labels[:10]) wI = sum_intensity sI = np.bincount( labels, weights=wI ) + print('wI sum:', wI.sum()) + print('sI sum:', sI.sum()) merged_sum_intensity = sI merged_sc = np.bincount( labels, weights=sc*wI )/sI @@ -595,6 +682,9 @@ def merge(hkl, etasign, dtyi, sum_intensity, sc, fc, omega, dty, xpos_refined): merged_omega = np.bincount( labels, weights=omega*wI )/sI merged_dty = np.bincount( labels, weights=dty*wI )/sI merged_xpos_refined = np.bincount( labels, weights=xpos_refined*wI )/sI + + print('merged_sc has shape', merged_sc.shape) + print('merged_sc sum:', merged_sc.sum()) return merged_sum_intensity, merged_sc, merged_fc, merged_omega, merged_dty, merged_xpos_refined @@ -604,13 +694,84 @@ def get_voxel_mask(y0, xi0, yi0, sinomega, cosomega, dty, ystep): dty_calc = y0 - xi0 * sinomega - yi0 * cosomega ydist = np.abs(dty_calc - dty) m = ydist <= ystep + # print('Voxel mask got', m.sum(), 'peaks') return m, ydist +@numba.njit +def compute_gve(sc, fc, omega, xpos, + distance, y_center, y_size, tilt_y, z_center, z_size, tilt_z, tilt_x, + o11, o12, o21, o22, + t_x, t_y, t_z, wedge, chi, wavelength): + + # compute g-vectors at this voxel + this_distance = distance - xpos + + tth, eta = compute_tth_eta(sc, fc, + y_center=y_center, + y_size=y_size, + tilt_y=tilt_y, + z_center=z_center, + z_size=z_size, + tilt_z=tilt_z, + tilt_x=tilt_x, + distance=this_distance, + o11=o11, + o12=o12, + o21=o21, + o22=o22, + t_x=t_x, + t_y=t_y, + t_z=t_z, + wedge=wedge, + chi=chi + ) + + gve = compute_g_vectors(tth, eta, + omega, + wavelength, + wedge=wedge, + chi=chi) + + return gve + + +@numba.njit +def weighted_lstsq_ubi_fit(ydist, gve, hkl): + # run the weighted fit + # a.T @ gve = h => gve.T @ a = h.T => a = np.linalg.pinv(gve.T) @ h.T, same for b and c + w = (1. / (ydist + 1) ).reshape(gve.shape[1], 1) + a = w * gve.T + b = w * hkl.T + m, n = a.shape[-2:] + rcond = np.finfo(b.dtype).eps * max(n, m) + ubifitT, residuals, rank, sing_vals = np.linalg.lstsq(a, b, rcond=rcond) + ubifit = ubifitT.T + + return w, ubifit, residuals, rank, sing_vals + +@numba.njit +def gve_norm(gve): + norms = np.zeros(gve.shape[1]) + for i in range(gve.shape[1]): + gv = gve[:,i] + norms[i] = np.sqrt(np.sum(np.power(gv, 2))) + + return norms + + +@numba.njit +def divide_where(arr1, arr2, out, wherearr): + """Do arr1/arr2. + In locations where wherearr == 0, return out instead""" + div = np.divide(arr1, arr2) + return np.where(wherearr != 0, div, out) + @numba.njit def mine(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, # refinement stuff sc, fc, eta, sum_intensity, sinomega, cosomega, omega, dty, dtyi, xpos, # icolf columns ystep, y0, + B0, distance, y_center, y_size, tilt_y, z_center, z_size, tilt_z, tilt_x, o11, o12, o21, o22, t_x, t_y, t_z, wedge, chi, wavelength, @@ -644,51 +805,42 @@ def mine(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, # re # but we already have x, y local_mask, _ = get_voxel_mask(y0, xi0, yi0, sinomega, cosomega, dty, ystep) - - # compute g-vectors at this voxel - this_distance = distance - xpos[local_mask] - - tth_gve, eta_gve = compute_tth_eta(sc[local_mask], fc[local_mask], - y_center=y_center, - y_size=y_size, - tilt_y=tilt_y, - z_center=z_center, - z_size=z_size, - tilt_z=tilt_z, - tilt_x=tilt_x, - distance=this_distance, - o11=o11, - o12=o12, - o21=o21, - o22=o22, - t_x=t_x, - t_y=t_y, - t_z=t_z, - wedge=wedge, - chi=chi - ) - - gve = compute_g_vectors(tth_gve, eta_gve, - omega[local_mask], - wavelength, - wedge=wedge, - chi=chi) + + # does our local masking agree? + + print('First local mask results...') + print(sinomega[local_mask].sum()) + + gve_voxel = compute_gve(sc[local_mask], fc[local_mask], omega[local_mask], xpos[local_mask], + distance=distance, y_center=y_center, y_size=y_size, tilt_y=tilt_y, z_center=z_center, z_size=z_size, tilt_z=tilt_z, tilt_x=tilt_x, + o11=o11, o12=o12, o21=o21, o22=o22, + t_x=t_x, t_y=t_y, t_z=t_z, wedge=wedge, chi=chi, wavelength=wavelength) # iterate through the ubis at this voxel for ubi_idx in np.arange(ubis_here.shape[2]): ubi = ubis_here[:, :, ubi_idx] - + # assign this UBI to the peaks at this voxel - j, labels, drlv2 = score_and_assign(ubi, gve, tol, ubi_idx) + j, labels, drlv2 = score_and_assign(ubi, gve_voxel, tol, ubi_idx) # print(labels.shape) # print(drlv2.shape) grain_peak_mask = labels == ubi_idx + + print('Assignment mask results...') + print(sinomega[local_mask][grain_peak_mask].sum()) + grain_npks = np.sum(grain_peak_mask) - print(grain_npks, 'unmerged peaks assigned before merging') + + if ubi_idx == 0: + print('In') + print('UBI Npks') + print(ubi, grain_npks) + + print('Assigned', grain_npks, 'peaks first time out of', local_mask.sum()) # compute hkl floats - hkl_double = np.dot(ubi, gve[:, grain_peak_mask]) + hkl_double = np.dot(ubi, gve_voxel[:, grain_peak_mask]) # hkl ints hkl = np.round(hkl_double) etasign = np.sign(eta[local_mask][grain_peak_mask]) @@ -700,53 +852,196 @@ def mine(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, # re merged_sinomega = np.sin(np.radians(merged_omega)) merged_cosomega = np.cos(np.radians(merged_omega)) + print('Merged', len(xpos[local_mask][grain_peak_mask]), 'peaks to', len(merged_xpos_refined), 'peaks') + + print('Merged peak data for masking...') + print('merged so sum', merged_sinomega.sum()) + print('merged co sum', merged_cosomega.sum()) + print('merged dty sum', merged_dty.sum()) + # print('did merge') - # re-compute peak mask with merged peaks + # re-compute voxel peak mask with merged peaks local_mask_of_grain, ydist = get_voxel_mask(y0, xi0, yi0, merged_sinomega, merged_cosomega, merged_dty, ystep) + print('Got', local_mask_of_grain.sum(), 'masked merged peaks') + print('ydist sum before merged masking', ydist.sum()) + + gve_voxel_merged = compute_gve(merged_sc[local_mask_of_grain], merged_fc[local_mask_of_grain], merged_omega[local_mask_of_grain], merged_xpos_refined[local_mask_of_grain], + distance=distance, y_center=y_center, y_size=y_size, tilt_y=tilt_y, z_center=z_center, z_size=z_size, tilt_z=tilt_z, tilt_x=tilt_x, + o11=o11, o12=o12, o21=o21, o22=o22, + t_x=t_x, t_y=t_y, t_z=t_z, wedge=wedge, chi=chi, wavelength=wavelength) - # compute g-vectors at this voxel again with merged peaks - this_distance = distance - merged_xpos_refined[local_mask_of_grain] - - tth_gve_grain, eta_gve_grain = compute_tth_eta(merged_sc[local_mask_of_grain], merged_fc[local_mask_of_grain], - y_center=y_center, - y_size=y_size, - tilt_y=tilt_y, - z_center=z_center, - z_size=z_size, - tilt_z=tilt_z, - tilt_x=tilt_x, - distance=this_distance, - o11=o11, - o12=o12, - o21=o21, - o22=o22, - t_x=t_x, - t_y=t_y, - t_z=t_z, - wedge=wedge, - chi=chi - ) - - gve_grain = compute_g_vectors(tth_gve_grain, eta_gve_grain, - merged_omega[local_mask_of_grain], - wavelength, - wedge=wedge, - chi=chi) + print('gve_voxel_merged sum before merged assignment', gve_voxel_merged.sum()) # print('reassigning') - # reassign merged g-vectors - j, labels, drlv2 = score_and_assign(ubi, gve_grain, tol, ubi_idx) + # reassign merged g-vectors with smaller merge_tol + j, labels, drlv2 = score_and_assign(ubi, gve_voxel_merged, merge_tol, ubi_idx) grain_peak_mask = labels == ubi_idx grain_npks = np.sum(grain_peak_mask) + print('Assigned', grain_npks, 'merged peaks out of', local_mask_of_grain.sum()) # re-mask g-vectors to those assigned - grain_gve = gve_grain[:, grain_peak_mask] - hkl_double = np.dot(ubi, grain_gve) + gve_grain = gve_voxel_merged[:, grain_peak_mask] + hkl_double = np.dot(ubi, gve_grain) hkl = np.round(hkl_double) grain_ydist = ydist[local_mask_of_grain][grain_peak_mask] - print(grain_npks, 'merged peaks assigned after merging') + # print(grain_npks, 'merged peaks assigned after merging') + # now we're ready to refine! + + if grain_npks > 6: + + print('Going into refinement...') + print('ydist sum', grain_ydist.sum()) + print('gve sum', gve_grain.sum()) + print('hkl sum', hkl.sum()) + + w, ubifit, residuals, rank, sing_vals = weighted_lstsq_ubi_fit(grain_ydist, gve_grain, hkl) + + # check the quality of the fit + worth_fitting = (ubifit is not None) and (rank==3) and (np.linalg.cond(ubifit)<1e14) and (np.linalg.det(ubifit) > 0) and (np.linalg.matrix_rank(ubifit)==3) + + # check the unitcell that you create from the UBI + if worth_fitting: + ucell = ubi_to_unitcell(ubifit) + _a, _b, _c, al, be, ga = ucell + + assert _a>0 + assert _b>0 + assert _c>0 + assert al>0 and al<180 + assert be>0 and be<180 + assert ga>0 and ga<180 + + # do we like the quality? + if worth_fitting: + ubi_out = ubifit + else: + ubi_out = ubi.copy() + + if ubi_idx == 0: + print('Out') + print('UBI Npks') + print(ubi_out, grain_npks) + + # now reassign the (probably refined) UBI + + j, labels, drlv2 = score_and_assign(ubi_out, gve_voxel_merged, merge_tol, ubi_idx) + grain_peak_mask = labels == ubi_idx + grain_npks = np.sum(grain_peak_mask) + + # if we like the fit quality, redetermine the g-vectors from the new peak mask + # then fit the strain tensor + + if worth_fitting: + # go for fancy eps fit + + print('Strain mask fit sum', grain_peak_mask.sum()) + print('B0') + print(B0) + + gve_grain_strainfit = gve_voxel_merged[:, grain_peak_mask] + hkl_double = np.dot( ubi_out, gve_grain_strainfit ) + hkl = np.round ( hkl_double ) + ydist = ydist[local_mask_of_grain][grain_peak_mask] + + print('gve_grain_strainfit sum:', gve_grain_strainfit.sum()) + print('ydist strainfit sum:', ydist.sum()) + + # get U from UBI without using ImageD11 grain class + U = ubi_and_ucell_to_u(ubi_out, ucell) + print('U') + print(U) + gve0 = U @ B0 @ hkl + gTg0 = np.sum(gve_grain_strainfit*gve0, axis=0) + gTg = np.sum(gve_grain_strainfit*gve_grain_strainfit, axis=0) + directional_strain = (gTg0/gTg) - 1 + print('Directional strain') + print(directional_strain) + + kappa = gve_grain_strainfit / gve_norm(gve_grain_strainfit) + kx, ky, kz = kappa + # M = np.array( [ kx*kx, ky*ky, kz*kz, 2*kx*ky, 2*kx*kz, 2*ky*kz ] ).T + M = np.column_stack((kx*kx, ky*ky, kz*kz, 2*kx*ky, 2*kx*kz, 2*ky*kz)) + + w = (1. / (ydist + 1) ).reshape(gve_grain_strainfit.shape[1], 1) + # The noise in the directional strain now propagates according to the linear transform + gnoise_std = 1e-4 + a = np.sum(gve0*(gnoise_std**2)*gve0, axis=0) + + strain_noise_std = np.sqrt(divide_where(a, gTg**2, out=np.ones_like(gTg), wherearr=gTg)) + + # strain_noise_std = np.sqrt( np.divide(a, gTg**2, out=np.ones_like(gTg), where=gTg!=0) ) + w = w * (1. / strain_noise_std.reshape(w.shape) ) + + w[directional_strain > np.mean(directional_strain) + np.std(directional_strain)*3.5 ] = 0 # outliers + w[directional_strain < np.mean(directional_strain) - np.std(directional_strain)*3.5 ] = 0 # outliers + + try: + w = w / np.max(w) + + a = w * M + b = w.flatten() * directional_strain + m, n = a.shape[-2:] + rcond = np.finfo(b.dtype).eps * max(n, m) + + eps_vec = np.linalg.lstsq( a, b, rcond=rcond )[0].T + sxx, syy, szz, sxy, sxz, syz = eps_vec + eps_tensor = np.array([[sxx, sxy, sxz],[sxy, syy, syz],[sxz, syz, szz]]) + except: + pass + + print(eps_tensor) + +@numba.njit +def ubi_to_unitcell(ubi): + # fast numba version, can't use guvec version from tensor_map.py here unfortunately + mt = np.dot(ubi, ubi.T) + G = mt + a, b, c = np.sqrt(np.diag(G)) + al = np.degrees(np.arccos(G[1, 2] / b / c)) + be = np.degrees(np.arccos(G[0, 2] / a / c)) + ga = np.degrees(np.arccos(G[0, 1] / a / b)) + return np.array([a, b, c, al, be, ga]) + +@numba.njit +def ubi_and_ucell_to_u(ubi, ucell): + # compute B + a, b, c = ucell[:3] + ralpha, rbeta, rgamma = np.radians(ucell[3:]) # radians + ca = np.cos(ralpha) + cb = np.cos(rbeta) + cg = np.cos(rgamma) + g = np.full((3, 3), np.nan, float) + g[0, 0] = a * a + g[0, 1] = a * b * cg + g[0, 2] = a * c * cb + g[1, 0] = a * b * cg + g[1, 1] = b * b + g[1, 2] = b * c * ca + g[2, 0] = a * c * cb + g[2, 1] = b * c * ca + g[2, 2] = c * c + gi = np.linalg.inv(g) + astar, bstar, cstar = np.sqrt(np.diag(gi)) + betas = np.degrees(np.arccos(gi[0, 2] / astar / cstar)) + gammas = np.degrees(np.arccos(gi[0, 1] / astar / bstar)) + + B = np.zeros((3,3)) + + B[0, 0] = astar + B[0, 1] = bstar * np.cos(np.radians(gammas)) + B[0, 2] = cstar * np.cos(np.radians(betas)) + B[1, 0] = 0.0 + B[1, 1] = bstar * np.sin(np.radians(gammas)) + B[1, 2] = -cstar * np.sin(np.radians(betas)) * ca + B[2, 0] = 0.0 + B[2, 1] = 0.0 + B[2, 2] = 1.0 / c + + u = np.dot(B, ubi).T + return u + def call_mine(refine, points_step_space=None): # prepare simple numpy array objects @@ -769,9 +1064,13 @@ def call_mine(refine, points_step_space=None): pars = refine.icolf.parameters.get_parameters() + dummy_var = np.eye(3) + B0 = unitcell_to_b(refine.ref_ucell.lattice_parameters, dummy_var) + mine(points_recon_space, all_pbpmap_ubis, ri_col, rj_col, refine.sx_grid, refine.sy_grid, refine.icolf.sc, refine.icolf.fc, refine.icolf.eta, refine.icolf.sum_intensity, refine.icolf.sinomega, refine.icolf.cosomega, refine.icolf.omega, refine.icolf.dty, refine.icolf.dtyi, refine.icolf.xpos_refined, refine.ystep, refine.y0, + B0, pars['distance'], pars['y_center'], pars['y_size'], pars['tilt_y'], pars['z_center'], pars['z_size'], pars['tilt_z'], pars['tilt_x'], pars['o11'], pars['o12'], pars['o21'], pars['o22'], pars['t_x'], pars['t_y'], pars['t_z'], pars['wedge'], pars['chi'], pars['wavelength'] @@ -835,6 +1134,8 @@ def load_data(): refine.mask = np.load('mask.npy') refine.singlemap = np.load('singlemap.npy') ds.update_colfile_pars(refine.icolf, phase_name=phase_str) + refine.ref_ucell = refine.dset.phases.unitcells[phase_str] + print('Data loaded!') @@ -843,8 +1144,8 @@ def load_data(): def main(): refine = load_data() - points = [(344, -212), (323, 147), (-228, 303)] - # points = [(344, -212)] + # points = [(344, -212), (323, 147), (-228, 303)] + points = [(344, -212)] test_funcs(refine, points) # test_funcs(args, [10,50,100,500,1000,5000,10000])