diff --git a/docs/user-guide/amor/amor-reduction.ipynb b/docs/user-guide/amor/amor-reduction.ipynb index 54461ab1..22843087 100644 --- a/docs/user-guide/amor/amor-reduction.ipynb +++ b/docs/user-guide/amor/amor-reduction.ipynb @@ -44,23 +44,7 @@ "## Create and configure the workflow\n", "\n", "We begin by creating the Amor workflow object which is a skeleton for reducing Amor data,\n", - "with pre-configured steps." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "workflow = amor.AmorWorkflow()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We then need to set the missing parameters which are specific to each experiment:" + "with pre-configured steps, and then set the missing parameters which are specific to each experiment:" ] }, { @@ -69,6 +53,7 @@ "metadata": {}, "outputs": [], "source": [ + "workflow = amor.AmorWorkflow()\n", "workflow[SampleSize[SampleRun]] = sc.scalar(10.0, unit='mm')\n", "workflow[SampleSize[ReferenceRun]] = sc.scalar(10.0, unit='mm')\n", "\n", @@ -76,13 +61,14 @@ "workflow[ChopperPhase[SampleRun]] = sc.scalar(-7.5, unit='deg')\n", "\n", "workflow[QBins] = sc.geomspace(dim='Q', start=0.005, stop=0.3, num=391, unit='1/angstrom')\n", - "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12, 301, unit='angstrom')\n", + "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12.5, 2001, unit='angstrom')\n", "\n", "# The YIndexLimits and ZIndexLimits define ranges on the detector where\n", "# data is considered to be valid signal.\n", "# They represent the lower and upper boundaries of a range of pixel indices.\n", - "workflow[YIndexLimits] = sc.scalar(11, unit=None), sc.scalar(41, unit=None)\n", - "workflow[ZIndexLimits] = sc.scalar(80, unit=None), sc.scalar(370, unit=None)" + "workflow[YIndexLimits] = sc.scalar(11), sc.scalar(41)\n", + "workflow[ZIndexLimits] = sc.scalar(80), sc.scalar(370)\n", + "workflow[BeamDivergenceLimits] = sc.scalar(-0.75, unit='deg'), sc.scalar(0.75, unit='deg')" ] }, { @@ -116,9 +102,9 @@ "# The sample rotation value in the file is slightly off, so we set it manually\n", "workflow[SampleRotation[ReferenceRun]] = sc.scalar(0.65, unit='deg')\n", "\n", - "reference_result = workflow.compute(IdealReferenceIntensity)\n", + "reference_result = workflow.compute(ReducedReference)\n", "# Set the result back onto the pipeline to cache it\n", - "workflow[IdealReferenceIntensity] = reference_result" + "workflow[ReducedReference] = reference_result" ] }, { @@ -198,6 +184,7 @@ " },\n", "}\n", "\n", + "\n", "reflectivity = {}\n", "for run_number, params in runs.items():\n", " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", @@ -282,11 +269,20 @@ "for run_number, params in runs.items():\n", " workflow[Filename[SampleRun]] = params[Filename[SampleRun]]\n", " workflow[SampleRotation[SampleRun]] = params[SampleRotation[SampleRun]]\n", - " diagnostics[run_number] = workflow.compute((ReflectivityData, ThetaBins[SampleRun]))\n", + " diagnostics[run_number] = workflow.compute((ReflectivityOverZW, ThetaBins[SampleRun]))\n", "\n", "# Scale the results using the scale factors computed earlier\n", "for run_number, scale_factor in zip(reflectivity.keys(), scale_factors, strict=True):\n", - " diagnostics[run_number][ReflectivityData] *= scale_factor" + " diagnostics[run_number][ReflectivityOverZW] *= scale_factor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diagnostics['608'][ReflectivityOverZW].hist().flatten(('blade', 'wire'), to='z').plot(norm='log')" ] }, { @@ -306,7 +302,7 @@ "from ess.amor.figures import wavelength_theta_figure\n", "\n", "wavelength_theta_figure(\n", - " [result[ReflectivityData] for result in diagnostics.values()],\n", + " [result[ReflectivityOverZW] for result in diagnostics.values()],\n", " theta_bins=[result[ThetaBins[SampleRun]] for result in diagnostics.values()],\n", " q_edges_to_display=(sc.scalar(0.018, unit='1/angstrom'), sc.scalar(0.113, unit='1/angstrom'))\n", ")" @@ -336,7 +332,7 @@ "from ess.amor.figures import q_theta_figure\n", "\n", "q_theta_figure(\n", - " [res[ReflectivityData] for res in diagnostics.values()],\n", + " [res[ReflectivityOverZW] for res in diagnostics.values()],\n", " theta_bins=[res[ThetaBins[SampleRun]] for res in diagnostics.values()],\n", " q_bins=workflow.compute(QBins)\n", ")" @@ -364,11 +360,11 @@ "workflow[Filename[SampleRun]] = runs['608'][Filename[SampleRun]]\n", "workflow[SampleRotation[SampleRun]] = runs['608'][SampleRotation[SampleRun]]\n", "wavelength_z_figure(\n", - " workflow.compute(FootprintCorrectedData[SampleRun]),\n", + " workflow.compute(Sample),\n", " wavelength_bins=workflow.compute(WavelengthBins),\n", " grid=False\n", ") + wavelength_z_figure(\n", - " reference_result,\n", + " workflow.compute(Reference),\n", " grid=False\n", ")" ] @@ -455,24 +451,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To support tracking provenance, we also list the corrections that were done by the workflow and store them in the dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "iofq_dataset.info.reduction.corrections = orso.find_corrections(\n", - " workflow.get(orso.OrsoIofQDataset)\n", - ")" - ] - }, { "cell_type": "markdown", "metadata": {}, diff --git a/docs/user-guide/amor/compare-to-eos.ipynb b/docs/user-guide/amor/compare-to-eos.ipynb index c814ea6f..fd861f24 100644 --- a/docs/user-guide/amor/compare-to-eos.ipynb +++ b/docs/user-guide/amor/compare-to-eos.ipynb @@ -85,15 +85,12 @@ "workflow[SampleSize[ReferenceRun]] = sc.scalar(10.0, unit='mm')\n", "workflow[ChopperPhase[SampleRun]] = sc.scalar(-7.5, unit='deg')\n", "\n", - "# In Jochens workflow:\n", - "# * divergence mask: [-0.7, 0.7]\n", - "# * note, divergence relative to beam center\n", "workflow[WavelengthBins] = sc.geomspace('wavelength', 2.8, 12.5, 301, unit='angstrom')\n", "workflow[QBins] = sc.geomspace(\n", " dim='Q', start=0.00505, stop=2.93164766e-01, num=391, unit='1/angstrom'\n", ")\n", - "workflow[YIndexLimits] = sc.scalar(11, unit=None), sc.scalar(41, unit=None)\n", - "workflow[ZIndexLimits] = sc.scalar(80, unit=None), sc.scalar(370, unit=None)\n", + "workflow[YIndexLimits] = sc.scalar(11), sc.scalar(41)\n", + "workflow[ZIndexLimits] = sc.scalar(80), sc.scalar(370)\n", "\n", "# Chopper phase value in the file is wrong, so we set it manually\n", "workflow[ChopperPhase[ReferenceRun]] = sc.scalar(-7.5, unit='deg')\n", @@ -101,9 +98,9 @@ "workflow[SampleRotation[ReferenceRun]] = sc.scalar(0.65, unit='deg')\n", "workflow[Filename[ReferenceRun]] = amor.data.amor_reference_run()\n", "\n", - "reference_result = workflow.compute(IdealReferenceIntensity)\n", + "reference_result = workflow.compute(ReducedReference)\n", "# Set the result back onto the pipeline to cache it\n", - "workflow[IdealReferenceIntensity] = reference_result" + "workflow[ReducedReference] = reference_result" ] }, { diff --git a/src/ess/amor/__init__.py b/src/ess/amor/__init__.py index 0290e6ba..1ef6d1e5 100644 --- a/src/ess/amor/__init__.py +++ b/src/ess/amor/__init__.py @@ -5,23 +5,30 @@ import sciline import scipp as sc + from ..reflectometry import providers as reflectometry_providers from ..reflectometry import supermirror from ..reflectometry.types import ( BeamSize, DetectorSpatialResolution, - Gravity, NeXusDetectorName, RunType, SamplePosition, BeamDivergenceLimits, ) -from . import conversions, load, orso, resolution, utils, figures +from . import ( + conversions, + load, + orso, + resolution, + utils, + figures, + normalization, + workflow, +) from .instrument_view import instrument_view from .types import ( AngularResolution, - Chopper1Position, - Chopper2Position, ChopperFrequency, ChopperPhase, QThetaFigure, @@ -42,10 +49,11 @@ *reflectometry_providers, *load.providers, *conversions.providers, - *resolution.providers, + *normalization.providers, *utils.providers, *figures.providers, *orso.providers, + *workflow.providers, ) """ List of providers for setting up a Sciline pipeline. @@ -61,12 +69,14 @@ def default_parameters() -> dict: supermirror.Alpha: sc.scalar(0.25 / 0.088, unit=sc.units.angstrom), BeamSize[RunType]: 2.0 * sc.units.mm, DetectorSpatialResolution[RunType]: 0.0025 * sc.units.m, - Gravity: sc.vector(value=[0, -1, 0]) * sc.constants.g, SamplePosition[RunType]: sc.vector([0, 0, 0], unit="m"), NeXusDetectorName[RunType]: "detector", ChopperPhase[RunType]: sc.scalar(7.0, unit="deg"), ChopperFrequency[RunType]: sc.scalar(8.333, unit="Hz"), - BeamDivergenceLimits: (sc.scalar(-0.7, unit='deg'), sc.scalar(0.7, unit='deg')), + BeamDivergenceLimits: ( + sc.scalar(-0.75, unit='deg'), + sc.scalar(0.75, unit='deg'), + ), } diff --git a/src/ess/amor/conversions.py b/src/ess/amor/conversions.py index 6d8c99f9..17fcc302 100644 --- a/src/ess/amor/conversions.py +++ b/src/ess/amor/conversions.py @@ -1,24 +1,173 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) +import numpy as np import scipp as sc -from ..reflectometry.types import IncidentBeam, RunType, SamplePosition -from .types import Chopper1Position, Chopper2Position +from ..reflectometry.conversions import reflectometry_q +from ..reflectometry.types import ( + BeamDivergenceLimits, + WavelengthBins, + YIndexLimits, + ZIndexLimits, +) +from .geometry import Detector +from .types import CoordTransformationGraph -def incident_beam( - source_chopper_1_position: Chopper1Position[RunType], - source_chopper_2_position: Chopper2Position[RunType], - sample_position: SamplePosition[RunType], -) -> IncidentBeam[RunType]: +def theta(wavelength, divergence_angle, L2, sample_rotation, detector_rotation): + ''' + Angle of reflection. + + Computes the angle between the scattering direction of + the neutron and the sample surface. + + :math:`\\gamma^*` denotes the angle between the scattering direction + and the horizontal plane. + :math:`\\gamma` denotes the angle between the ray from sample position + to detection position + and the horizontal plane. + :math:`L_2` is the length of the ray from sample position to detector position. + :math:`v` is the velocity of the neutron at the sample. + :math:`t` is the travel time from sample to detector. + + The parabolic trajectory of the neutron satisfies + + .. math:: + + \\sin(\\gamma) L_2 = \\sin(\\gamma^*) v t - \\frac{g}{2} t^2 + + and + + .. math:: + + \\cos(\\gamma) L_2 = \\cos(\\gamma^*) vt + + where :math:`g` is the gravitational acceleration. + + The second equation tells us that the approximation :math:`L_2=vt` + will have a small error if :math:`\\gamma` is close to 0 and + the difference between :math:`\\gamma` and :math:`\\gamma^*` is small. + + Using this approximation we can solve the first equation, + and by expressing :math:`v` in terms of the wavelength we get + + .. math:: + + \\sin(\\gamma^*) = + \\sin(\\gamma) + \\frac{g}{2} \\frac{L_2 \\lambda^2 h^2}{m_n^2}. + + Finally, the scattering angle is obtained by subtracting the sample rotation + relative to the horizontal plane. + ''' + c = sc.constants.g * sc.constants.m_n**2 / sc.constants.h**2 + out = (c * L2 * wavelength**2).to(unit='dimensionless') + sc.sin( + divergence_angle.to(unit='rad', copy=False) + detector_rotation.to(unit='rad') + ) + out = sc.asin(out, out=out) + out -= sample_rotation.to(unit='rad') + return out + + +def angle_of_divergence(theta, sample_rotation, angle_to_center_of_beam): + """ + Difference between the incident angle and the center of the incident beam. + Useful for filtering parts of the beam that have too high divergence. + + On the Amor instrument this is always in the interval [-0.75 deg, 0.75 deg], + but the divergence of the incident beam can be made lower. + """ + return ( + theta.to(unit='rad', copy=False) + - sample_rotation.to(unit='rad') + - angle_to_center_of_beam.to(unit='rad', copy=False) + ) + + +def wavelength( + event_time_offset, divergence_angle, L1, L2, chopper_phase, chopper_frequency +): + "Converts event_time_offset to wavelength using the chopper settings." + out = event_time_offset.to(unit="ns", dtype="float64", copy=True) + unit = out.bins.unit + tau = (1 / (2 * chopper_frequency.to(unit='Hz'))).to(unit=unit) + tof_offset = tau * chopper_phase.to(unit='rad') / (np.pi * sc.units.rad) + + minimum = -tof_offset + frame_bound = tau - tof_offset + maximum = 2 * tau - tof_offset + + # Frame unwrapping + out += sc.where( + (minimum < event_time_offset) & (event_time_offset < frame_bound), + tof_offset, + sc.where( + (frame_bound < event_time_offset) & (event_time_offset < maximum), + tof_offset - tau, + sc.scalar(np.nan, unit=unit), + ), + ) + # Correction for path length through guides being different + # depending on incident angle. + out -= (divergence_angle.to(unit="rad") / (np.pi * sc.units.rad)) * tau + out *= (sc.constants.h / sc.constants.m_n) / (L1 + L2) + return out.to(unit='angstrom', copy=False) + + +def coordinate_transformation_graph() -> CoordTransformationGraph: + return { + "divergence_angle": "pixel_divergence_angle", + "wavelength": wavelength, + "theta": theta, + "angle_of_divergence": angle_of_divergence, + "Q": reflectometry_q, + "L1": lambda chopper_distance: sc.abs(chopper_distance), + "L2": lambda distance_in_detector: distance_in_detector + Detector.distance, + } + + +def add_coords( + da: sc.DataArray, + graph: dict, +) -> sc.DataArray: + "Adds scattering coordinates to the raw detector data." + return da.transform_coords( + ("wavelength", "theta", "angle_of_divergence", "Q", "L1", "L2"), + graph, + rename_dims=False, + keep_intermediate=False, + keep_aliases=False, + ) + + +def _not_between(v, a, b): + return (v < a) | (v > b) + + +def add_masks( + da: sc.DataArray, + ylim: YIndexLimits, + zlims: ZIndexLimits, + bdlim: BeamDivergenceLimits, + wbins: WavelengthBins, +): """ - Compute the incident beam vector from the source chopper position vector, - instead of the source_position vector. + Masks the data by ranges in the detector + coordinates ``z`` and ``y``, and by the divergence of the beam, + and by wavelength. """ - chopper_midpoint = ( - source_chopper_1_position + source_chopper_2_position - ) * sc.scalar(0.5) - return sample_position - chopper_midpoint + da.masks["stripe_range"] = _not_between(da.coords["stripe"], *ylim) + da.masks['z_range'] = _not_between(da.coords["z_index"], *zlims) + da.bins.masks["divergence_too_large"] = _not_between( + da.bins.coords["angle_of_divergence"], + bdlim[0].to(unit=da.bins.coords["angle_of_divergence"].bins.unit), + bdlim[1].to(unit=da.bins.coords["angle_of_divergence"].bins.unit), + ) + da.bins.masks['wavelength'] = _not_between( + da.bins.coords['wavelength'], + wbins[0], + wbins[-1], + ) + return da -providers = (incident_beam,) +providers = (coordinate_transformation_graph,) diff --git a/src/ess/amor/figures.py b/src/ess/amor/figures.py index bc7d7c7b..5fcd1643 100644 --- a/src/ess/amor/figures.py +++ b/src/ess/amor/figures.py @@ -6,8 +6,8 @@ from ess.reflectometry.types import ( QBins, - ReflectivityData, ReflectivityOverQ, + ReflectivityOverZW, SampleRun, ) @@ -74,16 +74,18 @@ def wavelength_theta_figure( **kwargs, ): ''' - Creates a figure displaying a histogram over :math:`\\theta` and :math:`\\lambda`. + Creates a figure displaying a histogram over :math:`\\theta` + and :math:`\\lambda`. The input can either be a single data array containing the data to display, or a sequence of data arrays. - The inputs must either have coordinates called "theta" and "wavelength", - or they must be histograms with dimensions "theta" and "wavelength". + The inputs must either have coordinates called "theta" + and "wavelength", or they must be histograms with dimensions + "theta" and "wavelength". - If :code:`wavelength_bins` or :code:`theta_bins` are provided, they are used - to construct the histogram. If not provided, the function uses the + If :code:`wavelength_bins` or :code:`theta_bins` are provided, + they are used to construct the histogram. If not provided, the function uses the bin edges that already exist on the data arrays. If :code:`q_edges_to_display` is provided, lines will be drawn in the figure @@ -163,7 +165,8 @@ def q_theta_figure( **kwargs, ): ''' - Creates a figure displaying a histogram over :math:`\\theta` and :math:`Q`. + Creates a figure displaying a histogram over :math:`\\theta` + and :math:`Q`. The input can either be a single data array containing the data to display, or a sequence of data arrays. @@ -191,7 +194,10 @@ def q_theta_figure( if isinstance(da, sc.DataArray): return q_theta_figure( - (da,), q_bins=(q_bins,), theta_bins=(theta_bins,), **kwargs + (da,), + q_bins=(q_bins,), + theta_bins=(theta_bins,), + **kwargs, ) q_bins, theta_bins = ( @@ -273,14 +279,14 @@ def wavelength_z_figure( def wavelength_theta_diagnostic_figure( - da: ReflectivityData, + da: ReflectivityOverZW, thbins: ThetaBins[SampleRun], ) -> WavelengthThetaFigure: return wavelength_theta_figure(da, theta_bins=thbins) def q_theta_diagnostic_figure( - da: ReflectivityData, + da: ReflectivityOverZW, thbins: ThetaBins[SampleRun], qbins: QBins, ) -> QThetaFigure: @@ -288,7 +294,7 @@ def q_theta_diagnostic_figure( def wavelength_z_diagnostic_figure( - da: ReflectivityData, + da: ReflectivityOverZW, ) -> WavelengthZIndexFigure: return wavelength_z_figure(da) diff --git a/src/ess/amor/geometry.py b/src/ess/amor/geometry.py index 6134b44c..e9ee834e 100644 --- a/src/ess/amor/geometry.py +++ b/src/ess/amor/geometry.py @@ -26,49 +26,47 @@ class Detector: distance = sc.scalar(4000, unit="mm") -def _pixel_coordinate_in_detector_system( - pixelID: sc.Variable, -) -> tuple[sc.Variable, sc.Variable]: +def pixel_coordinates_in_detector_system() -> tuple[sc.Variable, sc.Variable]: """Determines beam travel distance inside the detector and the beam divergence angle from the detector number.""" - (bladeNr, bPixel) = ( - pixelID // (Detector.nWires * Detector.nStripes), - pixelID % (Detector.nWires * Detector.nStripes), + pixels = sc.DataArray( + sc.arange( + 'row', + 1, + ( + Detector.nBlades * Detector.nWires * Detector.nStripes + sc.scalar(1) + ).values, + unit=None, + ).fold( + 'row', + sizes={ + 'blade': Detector.nBlades, + 'wire': Detector.nWires, + 'stripe': Detector.nStripes, + }, + ), + coords={ + 'blade': sc.arange('blade', sc.scalar(0), Detector.nBlades), + 'wire': sc.arange('wire', sc.scalar(0), Detector.nWires), + 'stripe': sc.arange('stripe', sc.scalar(0), Detector.nStripes), + }, ) - # z index on blade, y index on detector - bZi = bPixel // Detector.nStripes # x position in detector # TODO: check with Jochen if this is correct, as old code was: # detX = bZi * Detector.dX - distance_inside_detector = (Detector.nWires - 1 - bZi) * Detector.dX - - bladeAngle = (2.0 * sc.asin(0.5 * Detector.bladeZ / Detector.distance)).to( - unit="degree" - ) - beam_divergence_angle = (Detector.nBlades / 2.0 - bladeNr) * bladeAngle - ( - sc.atan(bZi * Detector.dZ / (Detector.distance + bZi * Detector.dX)) - ).to(unit="degree") - return distance_inside_detector, beam_divergence_angle - - -def pixel_coordinate_in_lab_frame( - pixelID: sc.Variable, nu: sc.Variable -) -> tuple[sc.Variable, sc.Variable]: - """Computes spatial coordinates (lab reference frame), and the beam divergence - angle for the detector pixel associated with `pixelID`""" - distance_in_detector, divergence_angle = _pixel_coordinate_in_detector_system( - pixelID + pixels.coords['distance_in_detector'] = ( + Detector.nWires - 1 - pixels.coords['wire'] + ) * Detector.dX + bladeAngle = 2.0 * sc.asin(0.5 * Detector.bladeZ / Detector.distance) + pixels.coords['pixel_divergence_angle'] = ( + (Detector.nBlades / 2.0 - pixels.coords['blade']) * bladeAngle + - sc.atan( + pixels.coords['wire'] + * Detector.dZ + / (Detector.distance + pixels.coords['wire'] * Detector.dX) + ) + ).to(unit='rad') + pixels.coords['z_index'] = ( + Detector.nWires * pixels.coords['blade'] + pixels.coords['wire'] ) - - angle_to_horizon = (nu + divergence_angle).to(unit="rad") - distance_to_pixel = distance_in_detector + Detector.distance - - global_Y = distance_to_pixel * sc.sin(angle_to_horizon) - global_Z = distance_to_pixel * sc.cos(angle_to_horizon) - # TODO: the values for global_X are right now just an estimate. We should check with - # the instrument scientist what the actual values are. The X positions are ignored - # in the coordinate transformation, so this is not critical. - global_X = sc.zeros_like(global_Z) + sc.linspace( - "stripe", -0.1, 0.1, global_Z.sizes["stripe"], unit="m" - ).to(unit=global_Z.unit) - return sc.spatial.as_vectors(global_X, global_Y, global_Z), divergence_angle + return pixels diff --git a/src/ess/amor/load.py b/src/ess/amor/load.py index bc0cbcb2..9737e78b 100644 --- a/src/ess/amor/load.py +++ b/src/ess/amor/load.py @@ -4,21 +4,23 @@ from ..reflectometry.load import load_nx from ..reflectometry.types import ( + BeamSize, DetectorRotation, Filename, LoadedNeXusDetector, NeXusDetectorName, RawDetectorData, - ReducibleDetectorData, RunType, SampleRotation, + SampleSize, ) -from .geometry import Detector, pixel_coordinate_in_lab_frame +from .geometry import pixel_coordinates_in_detector_system from .types import ( - Chopper1Position, - Chopper2Position, + AngleCenterOfIncomingToHorizon, + ChopperDistance, ChopperFrequency, ChopperPhase, + ChopperSeparation, RawChopper, ) @@ -30,98 +32,55 @@ def load_detector( def load_events( - detector: LoadedNeXusDetector[RunType], detector_rotation: DetectorRotation[RunType] + detector: LoadedNeXusDetector[RunType], + detector_rotation: DetectorRotation[RunType], + sample_rotation: SampleRotation[RunType], + chopper_phase: ChopperPhase[RunType], + chopper_frequency: ChopperFrequency[RunType], + chopper_distance: ChopperDistance[RunType], + chopper_separation: ChopperSeparation[RunType], + sample_size: SampleSize[RunType], + beam_size: BeamSize[RunType], + angle_to_center_of_beam: AngleCenterOfIncomingToHorizon[RunType], ) -> RawDetectorData[RunType]: - detector_numbers = sc.arange( - "event_id", - start=1, - stop=(Detector.nBlades * Detector.nWires * Detector.nStripes).value + 1, - unit=None, - dtype="int32", - ) + detector_numbers = pixel_coordinates_in_detector_system() data = ( - detector['data'] + detector["data"] .bins.constituents["data"] - .group(detector_numbers) - .fold( - "event_id", - sizes={ - "blade": Detector.nBlades, - "wire": Detector.nWires, - "stripe": Detector.nStripes, - }, - ) + .group(detector_numbers.data.flatten(to='event_id')) + .fold("event_id", sizes=detector_numbers.sizes) ) - # Recent versions of scippnexus no longer add variances for events by default, so - # we add them here if they are missing. + data.coords.update(detector_numbers.coords) + if data.bins.constituents["data"].data.variances is None: data.bins.constituents["data"].data.variances = data.bins.constituents[ "data" ].data.values - pixel_inds = sc.array(dims=data.dims, values=data.coords["event_id"].values - 1) - position, angle_from_center_of_beam = pixel_coordinate_in_lab_frame( - pixelID=pixel_inds, nu=detector_rotation - ) - data.coords["position"] = position.to(unit="m", copy=False) - data.coords["angle_from_center_of_beam"] = angle_from_center_of_beam + data.coords["sample_rotation"] = sample_rotation.to(unit='rad') + data.coords["detector_rotation"] = detector_rotation.to(unit='rad') + data.coords["chopper_phase"] = chopper_phase + data.coords["chopper_frequency"] = chopper_frequency + data.coords["chopper_separation"] = chopper_separation + data.coords["chopper_distance"] = chopper_distance + data.coords["sample_size"] = sample_size + data.coords["beam_size"] = beam_size + data.coords["angle_to_center_of_beam"] = angle_to_center_of_beam.to(unit='rad') return RawDetectorData[RunType](data) -def compute_tof( - data: RawDetectorData[RunType], - phase: ChopperPhase[RunType], - frequency: ChopperFrequency[RunType], -) -> ReducibleDetectorData[RunType]: - data.bins.coords["tof"] = data.bins.coords.pop("event_time_offset").to( - unit="ns", dtype="float64", copy=False - ) - - tof_unit = data.bins.coords["tof"].bins.unit - tau = sc.to_unit(1 / (2 * frequency), tof_unit) - tof_offset = tau * phase / (180.0 * sc.units.deg) - - event_time_offset = data.bins.coords["tof"] - - minimum = -tof_offset - frame_bound = tau - tof_offset - maximum = 2 * tau - tof_offset - - offset = sc.where( - (minimum < event_time_offset) & (event_time_offset < frame_bound), - tof_offset, - sc.where( - (frame_bound < event_time_offset) & (event_time_offset < maximum), - tof_offset - tau, - 0.0 * tof_unit, - ), - ) - data.bins.masks["outside_of_pulse"] = (minimum > event_time_offset) | ( - event_time_offset > maximum - ) - data.bins.coords["tof"] += offset - data.bins.coords["tof"] -= ( - data.coords["angle_from_center_of_beam"].to(unit="deg") / (180.0 * sc.units.deg) - ) * tau - return ReducibleDetectorData[RunType](data) - - def amor_chopper(f: Filename[RunType]) -> RawChopper[RunType]: return next(load_nx(f, "NXentry/NXinstrument/NXdisk_chopper")) -def load_amor_chopper_1_position(ch: RawChopper[RunType]) -> Chopper1Position[RunType]: +def load_amor_chopper_distance(ch: RawChopper[RunType]) -> ChopperDistance[RunType]: # We know the value has unit 'mm' - return sc.vector([0, 0, ch["distance"] - ch["pair_separation"] / 2], unit="mm").to( - unit="m" - ) + return sc.scalar(ch["distance"], unit="mm") -def load_amor_chopper_2_position(ch: RawChopper[RunType]) -> Chopper2Position[RunType]: +def load_amor_chopper_separation(ch: RawChopper[RunType]) -> ChopperSeparation[RunType]: # We know the value has unit 'mm' - return sc.vector([0, 0, ch["distance"] + ch["pair_separation"] / 2], unit="mm").to( - unit="m" - ) + return sc.scalar(ch["pair_separation"], unit="mm") def load_amor_ch_phase(ch: RawChopper[RunType]) -> ChopperPhase[RunType]: @@ -154,15 +113,28 @@ def load_amor_detector_rotation(fp: Filename[RunType]) -> DetectorRotation[RunTy return sc.scalar(nu['value'].data['dim_1', 0]['time', 0].value, unit='deg') +def load_amor_angle_from_horizon_to_center_of_incident_beam( + fp: Filename[RunType], +) -> AngleCenterOfIncomingToHorizon[RunType]: + (kad,) = load_nx(fp, "NXentry/NXinstrument/master_parameters/kad") + natural_incident_angle = sc.scalar(0.245, unit='deg') + # This value should not change during the run. + # If it does we assume the change was too small to be relevant. + # Therefore only the first value is read from the log. + return natural_incident_angle + sc.scalar( + kad['value'].data['dim_1', 0]['time', 0].value, unit='deg' + ) + + providers = ( load_detector, load_events, - compute_tof, load_amor_ch_frequency, load_amor_ch_phase, - load_amor_chopper_1_position, - load_amor_chopper_2_position, + load_amor_chopper_distance, + load_amor_chopper_separation, load_amor_sample_rotation, load_amor_detector_rotation, + load_amor_angle_from_horizon_to_center_of_incident_beam, amor_chopper, ) diff --git a/src/ess/amor/normalization.py b/src/ess/amor/normalization.py new file mode 100644 index 00000000..dff6f951 --- /dev/null +++ b/src/ess/amor/normalization.py @@ -0,0 +1,113 @@ +import scipp as sc + +from ..reflectometry.conversions import reflectometry_q +from ..reflectometry.supermirror import ( + Alpha, + CriticalEdge, + MValue, + supermirror_reflectivity, +) +from ..reflectometry.types import ( + DetectorSpatialResolution, + QBins, + ReducedReference, + ReducibleData, + Reference, + Sample, + SampleRun, +) +from .conversions import theta +from .resolution import ( + angular_resolution, + q_resolution, + sample_size_resolution, + wavelength_resolution, +) +from .types import CoordTransformationGraph + + +def mask_events_where_supermirror_does_not_cover( + sam: ReducibleData[SampleRun], + ref: ReducedReference, + critical_edge: CriticalEdge, + mvalue: MValue, + alpha: Alpha, +) -> Sample: + """ + Mask events in regions of the detector the reference does not cover. + + Regions of the detector that the reference + measurement doesn't cover cannot be used to compute reflectivity. + + Preferably the reference measurement should cover the entire + detector, but sometimes that is not possible, for example + if the supermirror :math:`M` value was too limited or because the reference + was measured at too high angle. + + To figure out what events need to be masked, + compute the supermirror reflectivity as a function + of the :math:`Q` the event would have had if it had belonged to + the reference measurement. + """ + R = supermirror_reflectivity( + reflectometry_q( + sam.bins.coords["wavelength"], + theta( + sam.bins.coords["wavelength"], + sam.coords["pixel_divergence_angle"], + sam.coords["L2"], + ref.coords["sample_rotation"], + ref.coords["detector_rotation"], + ), + ), + c=critical_edge, + m=mvalue, + alpha=alpha, + ) + sam.bins.masks["supermirror_does_not_cover"] = sc.isnan(R) + return sam + + +def evaluate_reference_at_sample_coords( + reference: ReducedReference, + sample: ReducibleData[SampleRun], + qbins: QBins, + detector_spatial_resolution: DetectorSpatialResolution[SampleRun], + graph: CoordTransformationGraph, +) -> Reference: + """ + Adds a :math:`Q` and :math:`Q`-resolution coordinate to each bin of the ideal + intensity distribution. The coordinates are computed as if the data came from + the sample measurement, that is, they use the ``sample_rotation`` + and ``detector_rotation`` parameters from the sample measurement. + """ + ref = reference.copy() + ref.coords["sample_rotation"] = sample.coords["sample_rotation"] + ref.coords["detector_rotation"] = sample.coords["detector_rotation"] + ref.coords["sample_size"] = sample.coords["sample_size"] + ref.coords["detector_spatial_resolution"] = detector_spatial_resolution + ref.coords["wavelength"] = sc.midpoints(ref.coords["wavelength"]) + ref = ref.transform_coords( + ( + "Q", + "wavelength_resolution", + "sample_size_resolution", + "angular_resolution", + "Q_resolution", + ), + { + **graph, + "wavelength_resolution": wavelength_resolution, + "sample_size_resolution": sample_size_resolution, + "angular_resolution": angular_resolution, + "Q_resolution": q_resolution, + }, + rename_dims=False, + ) + return sc.values(ref) + + +providers = ( + evaluate_reference_at_sample_coords, + mask_events_where_supermirror_does_not_cover, +) diff --git a/src/ess/amor/orso.py b/src/ess/amor/orso.py index 970f2a09..b614539a 100644 --- a/src/ess/amor/orso.py +++ b/src/ess/amor/orso.py @@ -14,7 +14,7 @@ OrsoIofQDataset, OrsoReduction, ) -from ..reflectometry.types import QResolution, ReflectivityOverQ +from ..reflectometry.types import ReflectivityOverQ def build_orso_instrument(events: ReflectivityOverQ) -> OrsoInstrument: @@ -34,7 +34,6 @@ def build_orso_instrument(events: ReflectivityOverQ) -> OrsoInstrument: def build_orso_iofq_dataset( iofq: ReflectivityOverQ, - sigma_q: QResolution, data_source: OrsoDataSource, reduction: OrsoReduction, ) -> OrsoIofQDataset: @@ -60,11 +59,17 @@ def build_orso_iofq_dataset( qz = sc.midpoints(qz) r = sc.values(iofq.data) sr = sc.stddevs(iofq.data) - sqz = sigma_q.to(unit="1/angstrom", copy=False) + sqz = iofq.coords["Q_resolution"].to(unit="1/angstrom", copy=False) data = np.column_stack(tuple(map(_extract_values_array, (qz, r, sr, sqz)))) data = data[np.isfinite(data).all(axis=-1)] - return OrsoIofQDataset(OrsoDataset(header, data)) + ds = OrsoIofQDataset(OrsoDataset(header, data)) + ds.info.reduction.corrections = [ + "chopper ToF correction", + "footprint correction", + "supermirror calibration", + ] + return ds def _extract_values_array(var: sc.Variable) -> np.ndarray: diff --git a/src/ess/amor/resolution.py b/src/ess/amor/resolution.py index 5c395e98..143aa0fc 100644 --- a/src/ess/amor/resolution.py +++ b/src/ess/amor/resolution.py @@ -3,59 +3,39 @@ import scipp as sc from ..reflectometry.tools import fwhm_to_std -from ..reflectometry.types import ( - DetectorSpatialResolution, - FootprintCorrectedData, - QBins, - QResolution, - SampleRun, - SampleSize, -) -from .types import ( - AngularResolution, - Chopper1Position, - Chopper2Position, - SampleSizeResolution, - WavelengthResolution, -) def wavelength_resolution( - da: FootprintCorrectedData[SampleRun], - chopper_1_position: Chopper1Position[SampleRun], - chopper_2_position: Chopper2Position[SampleRun], -) -> WavelengthResolution: + L1, + L2, + chopper_separation, +): """ Find the wavelength resolution contribution as described in Section 4.3.3 of the Amor publication (doi: 10.1016/j.nima.2016.03.007). Parameters ---------- - chopper_1_position: - Position of first chopper (the one closer to the source). - chopper_2_position: - Position of second chopper (the one closer to the sample). - pixel_position: - Positions for detector pixels. + + L1: + Distance from midpoint between choppers to sample. + L2: + Distance from sample to detector. + chopper_separation: + Distance between choppers. Returns ------- : - The angular resolution variable, as standard deviation. + The wavelength resolution variable, as standard deviation. """ - pixel_position = da.coords["position"] - chopper_midpoint = (chopper_1_position + chopper_2_position) * sc.scalar(0.5) - distance_between_choppers = sc.norm(chopper_2_position - chopper_1_position) - chopper_detector_distance = sc.norm(pixel_position - chopper_midpoint) - return WavelengthResolution( - fwhm_to_std(distance_between_choppers / chopper_detector_distance) - ) + return fwhm_to_std(sc.abs(chopper_separation) / (L1 + L2)) def sample_size_resolution( - da: FootprintCorrectedData[SampleRun], - sample_size: SampleSize[SampleRun], -) -> SampleSizeResolution: + L2, + sample_size, +): """ The resolution from the projected sample size, where it may be bigger than the detector pixel resolution as described in Section 4.3.3 of the Amor @@ -63,8 +43,8 @@ def sample_size_resolution( Parameters ---------- - pixel_position: - Positions for detector pixels. + L2: + Distance from sample to detector. sample_size: Size of sample. @@ -73,30 +53,24 @@ def sample_size_resolution( : Standard deviation of contribution from the sample size. """ - return fwhm_to_std( - sc.to_unit(sample_size, "m") - / sc.to_unit( - sc.norm(da.coords["position"] - da.coords["sample_position"]), - "m", - copy=False, - ) - ) + return fwhm_to_std(sample_size / L2.to(unit=sample_size.unit)) def angular_resolution( - da: FootprintCorrectedData[SampleRun], - detector_spatial_resolution: DetectorSpatialResolution[SampleRun], -) -> AngularResolution: + theta, + L2, + detector_spatial_resolution, +): """ Determine the angular resolution as described in Section 4.3.3 of the Amor publication (doi: 10.1016/j.nima.2016.03.007). Parameters ---------- - pixel_position: - Positions for detector pixels. theta: - Theta values for events. + Angle of reflection. + L2: + Distance between sample and detector. detector_spatial_resolution: FWHM of detector pixel resolution. @@ -105,67 +79,43 @@ def angular_resolution( : Angular resolution standard deviation """ - theta = da.bins.coords["theta"] return ( fwhm_to_std( - sc.to_unit( - sc.atan( - sc.to_unit(detector_spatial_resolution, "m") - / sc.to_unit( - sc.norm(da.coords["position"] - da.coords["sample_position"]), - "m", - copy=False, - ) - ), - theta.bins.unit, - copy=False, + sc.atan( + detector_spatial_resolution + / L2.to(unit=detector_spatial_resolution.unit) ) - ) + ).to(unit=theta.unit) / theta ) -def sigma_Q( - da: FootprintCorrectedData[SampleRun], - angular_resolution: AngularResolution, - wavelength_resolution: WavelengthResolution, - sample_size_resolution: SampleSizeResolution, - qbins: QBins, -) -> QResolution: +def q_resolution( + Q, + angular_resolution, + wavelength_resolution, + sample_size_resolution, +): """ - Combine all of the components of the resolution and add Q contribution. + Compute resolution in Q. Parameters ---------- + Q: + Momentum transfer. angular_resolution: Angular resolution contribution. wavelength_resolution: Wavelength resolution contribution. sample_size_resolution: Sample size resolution contribution. - q_bins: - Q-bin values. Returns ------- : - Combined resolution function. + Q resolution function. """ - h = da.bins.concat().hist(Q=qbins) - s = ( - ( - da - * ( - angular_resolution**2 - + wavelength_resolution**2 - + sample_size_resolution**2 - ) - * da.bins.coords['Q'] ** 2 - ) - .bins.concat() - .hist(Q=qbins) + return sc.sqrt( + (angular_resolution**2 + wavelength_resolution**2 + sample_size_resolution**2) + * Q**2 ) - return sc.sqrt(sc.values(s / h)) - - -providers = (sigma_Q, angular_resolution, wavelength_resolution, sample_size_resolution) diff --git a/src/ess/amor/types.py b/src/ess/amor/types.py index c4bb3039..eade3cfc 100644 --- a/src/ess/amor/types.py +++ b/src/ess/amor/types.py @@ -9,6 +9,8 @@ AngularResolution = NewType("AngularResolution", sc.Variable) SampleSizeResolution = NewType("SampleSizeResolution", sc.Variable) +CoordTransformationGraph = NewType("CoordTransformationGraph", dict) + class ChopperFrequency(sciline.Scope[RunType, sc.Variable], sc.Variable): """Frequency of the choppers used in the run.""" @@ -18,21 +20,27 @@ class ChopperPhase(sciline.Scope[RunType, sc.Variable], sc.Variable): """Phase of the choppers in the run.""" -class Chopper1Position(sciline.Scope[RunType, sc.Variable], sc.Variable): - """Position of the first chopper relative the source of the beam.""" +class ChopperDistance(sciline.Scope[RunType, sc.Variable], sc.Variable): + """Distance from the midpoint between the two choppers to the sample.""" -class Chopper2Position(sciline.Scope[RunType, sc.Variable], sc.Variable): - """Position of the second chopper relative to the source of the beam.""" +class ChopperSeparation(sciline.Scope[RunType, sc.Variable], sc.Variable): + """Distance between the two choppers.""" class RawChopper(sciline.Scope[RunType, sc.DataGroup], sc.DataGroup): - """Chopper data loaded from nexus file""" + """Chopper data loaded from nexus file.""" class ThetaBins(sciline.Scope[RunType, sc.Variable], sc.Variable): """Binning in theta that takes into consideration that some - detector pixels have the same theta value""" + detector pixels have the same theta value.""" + + +class AngleCenterOfIncomingToHorizon( + sciline.Scope[RunType, sc.DataGroup], sc.DataGroup +): + """Angle from the center of the incoming beam to the horizon.""" WavelengthThetaFigure = NewType("WavelengthThetaFigure", Any) diff --git a/src/ess/amor/utils.py b/src/ess/amor/utils.py index aa36f944..4b08e755 100644 --- a/src/ess/amor/utils.py +++ b/src/ess/amor/utils.py @@ -13,7 +13,8 @@ def theta_grid( nu: DetectorRotation[RunType], mu: SampleRotation[RunType] ) -> ThetaBins[RunType]: - """Special grid used to create intensity maps over (theta, wavelength). + """Special grid used to create intensity maps over + (theta, wavelength). The grid avoids aliasing artifacts that occur if the theta bins overlap the blade edges.""" # angular offset of two blades: @@ -30,7 +31,10 @@ def theta_grid( blade_grid = blade_grid - 0.2 * stepWidth delta_grid = sc.array( - dims=["theta"], values=[], unit=blade_grid.unit, dtype=blade_grid.dtype + dims=["theta"], + values=[], + unit=blade_grid.unit, + dtype=blade_grid.dtype, ) # loop over all blades but one: for _ in range(Detector.nBlades.value - 1): @@ -53,10 +57,6 @@ def theta_grid( ).to(unit="rad") + 0.5 * Detector.nBlades * bladeAngle.to(unit="rad") ) - # TODO: If theta filtering is added, use it here - # some filtering - # theta_grid = theta_grid[theta_grid>=thetaMin] - # theta_grid = theta_grid[theta_grid<=thetaMax] return grid diff --git a/src/ess/amor/workflow.py b/src/ess/amor/workflow.py new file mode 100644 index 00000000..f1cbf997 --- /dev/null +++ b/src/ess/amor/workflow.py @@ -0,0 +1,33 @@ +from ..reflectometry.corrections import correct_by_footprint +from ..reflectometry.types import ( + BeamDivergenceLimits, + RawDetectorData, + ReducibleData, + RunType, + WavelengthBins, + YIndexLimits, + ZIndexLimits, +) +from .conversions import add_coords, add_masks +from .types import CoordTransformationGraph + + +def add_coords_masks_and_apply_corrections( + da: RawDetectorData[RunType], + ylim: YIndexLimits, + zlims: ZIndexLimits, + bdlim: BeamDivergenceLimits, + wbins: WavelengthBins, + graph: CoordTransformationGraph, +) -> ReducibleData[RunType]: + """ + Computes coordinates, masks and corrections that are + the same for the sample measurement and the reference measurement. + """ + da = add_coords(da, graph) + da = add_masks(da, ylim, zlims, bdlim, wbins) + correct_by_footprint(da) + return da + + +providers = (add_coords_masks_and_apply_corrections,) diff --git a/src/ess/reflectometry/__init__.py b/src/ess/reflectometry/__init__.py index d0ec032d..60401bb7 100644 --- a/src/ess/reflectometry/__init__.py +++ b/src/ess/reflectometry/__init__.py @@ -4,20 +4,19 @@ import importlib.metadata -from . import calibrations, conversions, corrections, normalize, orso -from .load import load_reference, save_reference - try: __version__ = importlib.metadata.version("essreflectometry") except importlib.metadata.PackageNotFoundError: __version__ = "0.0.0" + +from . import conversions, orso, normalization +from .load import load_reference, save_reference + providers = ( *conversions.providers, - *corrections.providers, - *calibrations.providers, - *normalize.providers, *orso.providers, + *normalization.providers, ) """ List of providers for setting up a Sciline pipeline. diff --git a/src/ess/reflectometry/calibrations.py b/src/ess/reflectometry/calibrations.py deleted file mode 100644 index 34f9d58e..00000000 --- a/src/ess/reflectometry/calibrations.py +++ /dev/null @@ -1,45 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2023 Scipp contributors (https://github.com/scipp) -import scipp as sc - -from . import supermirror -from .types import ReferenceIntensity - - -def calibration_factor( - da: ReferenceIntensity, - m_value: supermirror.MValue, - critical_edge: supermirror.CriticalEdge, - alpha: supermirror.Alpha, -) -> supermirror.SupermirrorReflectivityCorrection: - """ - Return the calibration factor for the supermirror. - - Parameters - ---------- - qbins: - edges of binning of Q. - m_value: - m-value for the supermirror. - critical_edge: - Supermirror critical edge. - alpha: - Supermirror alpha value. - - Returns - ------- - : - Calibration factor at the midpoint of each Q-bin. - """ - q = da.coords['Q'] - max_q = m_value * critical_edge - lim = (q <= critical_edge).astype(float) - lim.unit = 'one' - nq = 1.0 / (1.0 - alpha * (q - critical_edge)) - calibration_factor = sc.where( - q < max_q, lim + (1 - lim) * nq, sc.scalar(float('nan')) - ) - return calibration_factor - - -providers = (calibration_factor,) diff --git a/src/ess/reflectometry/conversions.py b/src/ess/reflectometry/conversions.py index 494699ee..8e5dca25 100644 --- a/src/ess/reflectometry/conversions.py +++ b/src/ess/reflectometry/conversions.py @@ -2,99 +2,19 @@ # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc from scipp.constants import pi -from scippneutron._utils import elem_dtype, elem_unit -from scippneutron.conversion.beamline import scattering_angle_in_yz_plane -from scippneutron.conversion.graph import beamline, tof - -from .types import ( - BeamDivergenceLimits, - DataWithScatteringCoordinates, - DetectorRotation, - Gravity, - IncidentBeam, - MaskedData, - ReducibleDetectorData, - RunType, - SamplePosition, - SampleRotation, - SpecularReflectionCoordTransformGraph, - WavelengthBins, - YIndexLimits, - ZIndexLimits, -) - - -def theta( - incident_beam: sc.Variable, - scattered_beam: sc.Variable, - wavelength: sc.Variable, - gravity: sc.Variable, - sample_rotation: sc.Variable, -) -> sc.Variable: - r"""Compute the scattering angle w.r.t. the sample plane. - - This function uses the definition given in - :func:`scippneutron.conversion.beamline.scattering_angle_in_yz_plane` - and includes the sample rotation :math:`\omega`: - - .. math:: - - \mathsf{tan}(\gamma) &= \frac{|y_d^{\prime}|}{z_d} \\ - \theta = \gamma - \omega - - with - - .. math:: - - y'_d = y_d + \frac{|g| m_n^2}{2 h^2} L_2^{\prime\, 2} \lambda^2 - - Attention - --------- - The above equation for :math:`y'_d` approximates :math:`L_2 \approx L'_2`. - See :func:`scippneutron.conversion.beamline.scattering_angle_in_yz_plane` - for more information. - - Parameters - ---------- - incident_beam: - Beam from source to sample. Expects ``dtype=vector3``. - scattered_beam: - Beam from sample to detector. Expects ``dtype=vector3``. - wavelength: - Wavelength of neutrons. - gravity: - Gravity vector. - sample_rotation: - The sample rotation angle :math:`\omega`. - - Returns - ------- - : - The polar scattering angle :math:`\theta`. - """ - angle = scattering_angle_in_yz_plane( - incident_beam=incident_beam, - scattered_beam=scattered_beam, - wavelength=wavelength, - gravity=gravity, - ) - angle -= sample_rotation.to(unit=elem_unit(angle)) - return angle +from scippneutron._utils import elem_dtype def reflectometry_q(wavelength: sc.Variable, theta: sc.Variable) -> sc.Variable: """ - Compute the Q vector from the theta angle computed as the difference - between gamma and omega. - Note that this is identical the 'normal' Q defined in scippneutron, except that - the `theta` angle is given as an input instead of `two_theta`. + Compute momentum transfer from reflection angle. Parameters ---------- wavelength: Wavelength values for the events. theta: - Theta values, accounting for gravity. + Angle of reflection for the events. Returns ------- @@ -106,74 +26,4 @@ def reflectometry_q(wavelength: sc.Variable, theta: sc.Variable) -> sc.Variable: return c * sc.sin(theta.astype(dtype, copy=False)) / wavelength -def specular_reflection( - incident_beam: IncidentBeam[RunType], - sample_position: SamplePosition[RunType], - sample_rotation: SampleRotation[RunType], - detector_rotation: DetectorRotation[RunType], - gravity: Gravity, -) -> SpecularReflectionCoordTransformGraph[RunType]: - """ - Generate a coordinate transformation graph for specular reflection reflectometry. - - Returns - ------- - : - Specular reflectometry graph. - """ - graph = { - **beamline.beamline(scatter=True), - **tof.elastic_wavelength("tof"), - "theta": theta, - "Q": reflectometry_q, - "incident_beam": lambda: incident_beam, - "sample_position": lambda: sample_position, - "sample_rotation": lambda: sample_rotation, - "detector_rotation": lambda: detector_rotation, - "gravity": lambda: gravity, - } - return SpecularReflectionCoordTransformGraph(graph) - - -def add_coords( - da: ReducibleDetectorData[RunType], - graph: SpecularReflectionCoordTransformGraph[RunType], -) -> DataWithScatteringCoordinates[RunType]: - da = da.transform_coords( - ["theta", "wavelength", "Q", "detector_rotation"], graph=graph - ) - da.coords.set_aligned('detector_rotation', False) - da.coords["z_index"] = sc.arange( - "row", 0, da.sizes["blade"] * da.sizes["wire"], unit=None - ).fold("row", sizes={dim: da.sizes[dim] for dim in ("blade", "wire")}) - da.coords["y_index"] = sc.arange("stripe", 0, da.sizes["stripe"], unit=None) - return da - - -def add_masks( - da: DataWithScatteringCoordinates[RunType], - ylim: YIndexLimits, - wb: WavelengthBins, - zlim: ZIndexLimits, - bdlim: BeamDivergenceLimits, -) -> MaskedData[RunType]: - da.masks["beam_divergence_too_large"] = ( - da.coords["angle_from_center_of_beam"] < bdlim[0] - ) | (da.coords["angle_from_center_of_beam"] > bdlim[1]) - da.masks["y_index_range"] = (da.coords["y_index"] < ylim[0]) | ( - da.coords["y_index"] > ylim[1] - ) - da.bins.masks["wavelength_mask"] = (da.bins.coords["wavelength"] < wb[0]) | ( - da.bins.coords["wavelength"] > wb[-1] - ) - da.masks["z_index_range"] = (da.coords["z_index"] < zlim[0]) | ( - da.coords["z_index"] > zlim[1] - ) - return da - - -providers = ( - add_masks, - add_coords, - specular_reflection, -) +providers = () diff --git a/src/ess/reflectometry/corrections.py b/src/ess/reflectometry/corrections.py index 11d7486d..0cd8716a 100644 --- a/src/ess/reflectometry/corrections.py +++ b/src/ess/reflectometry/corrections.py @@ -1,81 +1,39 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2023 Scipp contributors (https://github.com/scipp) - import scipp as sc -from .supermirror import SupermirrorReflectivityCorrection from .tools import fwhm_to_std -from .types import ( - BeamSize, - FootprintCorrectedData, - IdealReferenceIntensity, - MaskedData, - ReferenceIntensity, - ReferenceRun, - RunType, - SampleSize, - WavelengthBins, -) -def footprint_correction( - data_array: MaskedData[RunType], - beam_size: BeamSize[RunType], - sample_size: SampleSize[RunType], -) -> FootprintCorrectedData[RunType]: +def footprint_on_sample( + theta: sc.Variable, + beam_size: sc.Variable, + sample_size: sc.Variable, +) -> sc.Variable: """ - Corrects the event weights by the fraction of the beam hitting the sample. + Computes the fraction of the beam hitting the sample. Depends on :math:`\\theta`. Parameters ---------- - data_array: - Data array to perform footprint correction on. + theta: + Incidence angle relative to sample surface. beam_size: Full width half maximum of the beam. sample_size: - Size of the sample. - TODO: check what sample size actually means. Is it the sample diameter? etc. + Size of the sample, width in the beam direction. Returns ------- : - Footprint corrected data array. - """ - size_of_beam_on_sample = beam_size / sc.sin(data_array.bins.coords["theta"]) - footprint_scale = sc.erf(fwhm_to_std(sample_size / size_of_beam_on_sample)) - data_array_fp_correction = data_array / footprint_scale - return FootprintCorrectedData[RunType](data_array_fp_correction) - - -def compute_reference_intensity( - da: FootprintCorrectedData[ReferenceRun], wb: WavelengthBins -) -> ReferenceIntensity: - """Creates a reference intensity map over (z_index, wavelength). - Rationale: - The intensity expressed in those variables should not vary - with the experiment parameters (such as sample rotation). - Therefore it can be used to normalize sample measurements. + Fraction of beam hitting the sample. """ - b = da.bin(wavelength=wb, dim=set(da.dims) - set(da.coords["z_index"].dims)) - h = b.hist() - h.masks["too_few_events"] = h.data < sc.scalar(1, unit="counts") - # Add a Q coordinate to each bin, the Q is not completely unique in every bin, - # but it is close enough. - h.coords["Q"] = b.bins.coords["Q"].bins.mean() - return ReferenceIntensity(h) - - -def calibrate_reference( - da: ReferenceIntensity, cal: SupermirrorReflectivityCorrection -) -> IdealReferenceIntensity: - """Calibrates the reference intensity by the - inverse of the supermirror reflectivity""" - return IdealReferenceIntensity(da * cal) + size_of_beam_on_sample = beam_size / sc.sin(theta) + return sc.erf(fwhm_to_std(sample_size / size_of_beam_on_sample)) -providers = ( - footprint_correction, - calibrate_reference, - compute_reference_intensity, -) +def correct_by_footprint(da: sc.DataArray) -> None: + "Corrects the data by the size of the footprint on the sample." + da /= footprint_on_sample( + da.bins.coords['theta'], + da.coords['beam_size'], + da.coords['sample_size'], + ) diff --git a/src/ess/reflectometry/load.py b/src/ess/reflectometry/load.py index eec91fc9..b95fd812 100644 --- a/src/ess/reflectometry/load.py +++ b/src/ess/reflectometry/load.py @@ -5,7 +5,7 @@ import scipp as sc import scippnexus as snx -from .types import IdealReferenceIntensity, ReferenceFilePath +from .types import ReducedReference, ReferenceFilePath def load_nx(group: snx.Group | str, *paths: str): @@ -38,9 +38,9 @@ def _unique_child_group( def save_reference(pl: sciline.Pipeline, fname: str): - pl.compute(IdealReferenceIntensity).save_hdf5(fname) + pl.compute(ReducedReference).save_hdf5(fname) return fname -def load_reference(fname: ReferenceFilePath) -> IdealReferenceIntensity: +def load_reference(fname: ReferenceFilePath) -> ReducedReference: return sc.io.hdf5.load_hdf5(fname) diff --git a/src/ess/reflectometry/normalization.py b/src/ess/reflectometry/normalization.py new file mode 100644 index 00000000..7f17dbc1 --- /dev/null +++ b/src/ess/reflectometry/normalization.py @@ -0,0 +1,91 @@ +import scipp as sc + +from .supermirror import ( + Alpha, + CriticalEdge, + MValue, + supermirror_reflectivity, +) +from .types import ( + QBins, + ReducedReference, + ReducibleData, + Reference, + ReferenceRun, + ReflectivityOverQ, + ReflectivityOverZW, + Sample, + WavelengthBins, +) + + +def reduce_reference( + reference: ReducibleData[ReferenceRun], + wavelength_bins: WavelengthBins, + critical_edge: CriticalEdge, + mvalue: MValue, + alpha: Alpha, +) -> ReducedReference: + """ + Reduces the reference measurement to estimate the + intensity distribution in the detector for + an ideal sample with reflectivity :math:`R = 1`. + """ + R = supermirror_reflectivity( + reference.bins.coords['Q'], + c=critical_edge, + m=mvalue, + alpha=alpha, + ) + reference.bins.masks['invalid'] = sc.isnan(R) + reference /= R + return reference.bins.concat(('stripe',)).hist(wavelength=wavelength_bins) + + +def reduce_sample_over_q( + sample: Sample, + reference: Reference, + qbins: QBins, +) -> ReflectivityOverQ: + """ + Computes reflectivity as ratio of + sample intensity and intensity from a sample + with ideal reflectivity. + + Returns reflectivity as a function of :math:`Q`. + """ + h = reference.flatten(to='Q').hist(Q=qbins) + R = sample.bins.concat().bin(Q=qbins) / h.data + R.coords['Q_resolution'] = sc.sqrt( + ( + (reference * reference.coords['Q_resolution'] ** 2) + .flatten(to='Q') + .hist(Q=qbins) + ) + / h + ).data + return R + + +def reduce_sample_over_zw( + sample: Sample, + reference: Reference, + wbins: WavelengthBins, +) -> ReflectivityOverZW: + """ + Computes reflectivity as ratio of + sample intensity and intensity from a sample + with ideal reflectivity. + + Returns reflectivity as a function of ``blade``, ``wire`` and :math:`\\wavelength`. + """ + R = sample.bins.concat(('stripe',)).bin(wavelength=wbins) / reference.data + R.masks["too_few_events"] = reference.data < sc.scalar(1, unit="counts") + return R + + +providers = ( + reduce_reference, + reduce_sample_over_q, + reduce_sample_over_zw, +) diff --git a/src/ess/reflectometry/normalize.py b/src/ess/reflectometry/normalize.py deleted file mode 100644 index 18d56283..00000000 --- a/src/ess/reflectometry/normalize.py +++ /dev/null @@ -1,149 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2023 Scipp contributors (https://github.com/scipp) -import warnings - -import scipp as sc -from scipy.optimize import OptimizeWarning - -from .types import ( - FootprintCorrectedData, - IdealReferenceIntensity, - NormalizationFactor, - QBins, - QResolution, - ReflectivityData, - ReflectivityOverQ, - SampleRun, - WavelengthBins, -) - - -def normalization_factor( - da: FootprintCorrectedData[SampleRun], - corr: IdealReferenceIntensity, - wbins: WavelengthBins, -) -> NormalizationFactor: - """The correction matrix gives us the expected intensity at each - (z_index, wavelength) bin assuming the reflectivity is one. - To normalize the sample measurement we need to integrate the total - expected intensity in every Q-bin. - Note that Q refers to the 'sample-Q', different from the 'reference-Q'. - - The 'sample-Q' is computed taking the mean of the sample measurement Q - value in every (z_index, wavelength) bin. - One complication however is that some bins have zero intensity from the - sample measurement, so we are unable to assign a 'sample-Q' value to those bins. - Therefore we estimate the intensity in the missing bins by fitting the - 'sample-q' as a function of z_index and wavelength. - - Steps: - Approximate 'sample-q' in every (z_index, wavelength) bin - Fit 'sample-q'. - Compute 'sample-q' in all bins using the fit. - Return the reference intensity with the 'sample-q' as a coordinate. - - """ - sample_q = ( - da.bin(wavelength=wbins, dim=set(da.dims) - set(da.coords["z_index"].dims)) - .bins.coords["Q"] - .bins.mean() - ) - - wm = sc.midpoints(corr.coords["wavelength"]) - - def q_of_z_wavelength(wavelength, a, b): - return a + b / wavelength - - with warnings.catch_warnings(): - # `curve_fit` raises a warning if it fails to estimate variances. - # We don't care here because we only need the parameter values and anyway - # assume that the fit worked. - # The warning can be caused by there being too few points to estimate - # uncertainties because of masks. - warnings.filterwarnings( - "ignore", - message="Covariance of the parameters could not be estimated", - category=OptimizeWarning, - ) - p, _ = sc.curve_fit( - ["wavelength"], - q_of_z_wavelength, - sc.DataArray( - data=sample_q, - coords={"wavelength": wm}, - masks={ - **corr.masks, - "_sample_q_isnan": sc.isnan(sample_q), - }, - ), - p0={"a": sc.scalar(-1e-3, unit="1/angstrom")}, - ) - return sc.DataArray( - data=corr.data, - coords={ - "Q": q_of_z_wavelength( - wm, - sc.values(p["a"]), - sc.values(p["b"]), - ).data, - }, - masks=corr.masks, - ) - - -def reflectivity_over_q( - da: FootprintCorrectedData[SampleRun], - n: NormalizationFactor, - qbins: QBins, - qres: QResolution, -) -> ReflectivityOverQ: - """ - Normalize the sample measurement by the (ideally calibrated) supermirror. - - Parameters - ---------- - sample: - Sample measurement with coord 'Q' - supermirror: - Supermirror measurement with coord of 'Q' representing the sample 'Q' - - Returns - ------- - : - Reflectivity as a function of Q - """ - reflectivity = da.bin(Q=qbins, dim=da.dims) / sc.values(n.hist(Q=qbins, dim=n.dims)) - reflectivity.coords['Q_resolution'] = qres.data - for coord, value in da.coords.items(): - if ( - not isinstance(value, sc.Variable) - or len(set(value.dims) - set(reflectivity.dims)) == 0 - ): - reflectivity.coords[coord] = value - return ReflectivityOverQ(reflectivity) - - -def reflectivity_per_event( - da: FootprintCorrectedData[SampleRun], - n: IdealReferenceIntensity, - wbins: WavelengthBins, -) -> ReflectivityData: - """ - Weight the sample measurement by the (ideally calibrated) supermirror. - - Returns: - reflectivity "per event" - """ - reflectivity = da.bin(wavelength=wbins, dim=set(da.dims) - set(n.dims)) / sc.values( - n - ) - for coord, value in da.coords.items(): - if ( - not isinstance(value, sc.Variable) - or len(set(value.dims) - set(reflectivity.dims)) == 0 - ): - reflectivity.coords[coord] = value - return ReflectivityData(reflectivity) - - -providers = (reflectivity_over_q, normalization_factor, reflectivity_per_event) diff --git a/src/ess/reflectometry/orso.py b/src/ess/reflectometry/orso.py index 6244f9b4..8185a6f2 100644 --- a/src/ess/reflectometry/orso.py +++ b/src/ess/reflectometry/orso.py @@ -6,32 +6,22 @@ of reference runs and only use the metadata of the sample run. """ -import graphlib import os import platform from datetime import datetime, timezone -from typing import Any, NewType +from typing import NewType from dateutil.parser import parse as parse_datetime from orsopy.fileio import base as orso_base from orsopy.fileio import data_source, orso, reduction from .load import load_nx -from .supermirror import SupermirrorReflectivityCorrection from .types import ( Filename, - FootprintCorrectedData, - ReducibleDetectorData, ReferenceRun, SampleRun, ) -try: - from sciline.task_graph import TaskGraph -except ModuleNotFoundError: - TaskGraph = Any - - OrsoCreator = NewType("OrsoCreator", orso_base.Person) """ORSO creator, that is, the person who processed the data.""" @@ -181,46 +171,6 @@ def build_orso_data_source( ) -_CORRECTIONS_BY_GRAPH_KEY = { - ReducibleDetectorData[SampleRun]: "chopper ToF correction", - FootprintCorrectedData[SampleRun]: "footprint correction", - SupermirrorReflectivityCorrection: "supermirror calibration", -} - - -def find_corrections(task_graph: TaskGraph) -> list[str]: - """Determine the list of corrections for ORSO from a task graph. - - Checks for known keys in the graph that correspond to corrections - that should be tracked in an ORSO output dataset. - Bear in mind that this exclusively checks the types used as keys in a task graph, - it cannot detect other corrections that are performed within providers - or outside the graph. - - Parameters - ---------- - : - task_graph: - The task graph used to produce output data. - - Returns - ------- - : - List of corrections in the order they are applied in. - """ - toposort = graphlib.TopologicalSorter( - { - key: tuple(provider.arg_spec.keys()) - for key, provider in task_graph._graph.items() - } - ) - return [ - c - for key in toposort.static_order() - if (c := _CORRECTIONS_BY_GRAPH_KEY.get(key, None)) is not None - ] - - providers = ( build_orso_data_source, build_orso_measurement, diff --git a/src/ess/reflectometry/supermirror/__init__.py b/src/ess/reflectometry/supermirror/__init__.py index b1a03792..6607face 100644 --- a/src/ess/reflectometry/supermirror/__init__.py +++ b/src/ess/reflectometry/supermirror/__init__.py @@ -1,4 +1,38 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) # flake8: noqa: F401 -from .types import Alpha, CriticalEdge, MValue, SupermirrorReflectivityCorrection +import scipp as sc +import numpy as np + +from .types import Alpha, CriticalEdge, MValue + + +def supermirror_reflectivity( + q: sc.Variable, c: CriticalEdge, m: MValue, alpha: Alpha +) -> sc.Variable: + """ + Returns the reflectivity of the supermirror. + For ``q`` outside of the region of known reflectivity + this function returns ``nan``. + + Parameters + ---------- + q: + Momentum transfer. + m_value: + m-value for the supermirror. + critical_edge: + Supermirror critical edge. + alpha: + Supermirror alpha value. + + Returns + ------- + : + Reflectivity of the supermirror at q. + """ + return sc.where( + q < c, + sc.scalar(1.0), + sc.where(q < m * c, sc.scalar(1) - alpha * (q - c), sc.scalar(np.nan)), + ) diff --git a/src/ess/reflectometry/supermirror/types.py b/src/ess/reflectometry/supermirror/types.py index ddd4024c..b8e42239 100644 --- a/src/ess/reflectometry/supermirror/types.py +++ b/src/ess/reflectometry/supermirror/types.py @@ -9,5 +9,3 @@ '''Critical edge value of the supermirror''' Alpha = NewType('Alpha', sc.Variable) ''':math:`\\alpha` value of the supermirror''' -SupermirrorReflectivityCorrection = NewType('SupermirrorCalibrationFactor', sc.Variable) -'''Compensates the finite reflectivity of the supermirror''' diff --git a/src/ess/reflectometry/tools.py b/src/ess/reflectometry/tools.py index a5a5739f..23073b8c 100644 --- a/src/ess/reflectometry/tools.py +++ b/src/ess/reflectometry/tools.py @@ -35,25 +35,6 @@ def fwhm_to_std(fwhm: sc.Variable) -> sc.Variable: return fwhm / _STD_TO_FWHM -def std_to_fwhm(std: sc.Variable) -> sc.Variable: - """ - Convert from standard deviation to full-width half maximum. - - Parameters - ---------- - std: - Standard deviation. - - Returns - ------- - : - Full-width half maximum. - """ - # Enables the conversion from full width half - # maximum to standard deviation - return std * _STD_TO_FWHM - - def linlogspace( dim: str, edges: list | np.ndarray, @@ -168,9 +149,14 @@ def _create_qgrid_where_overlapping(qgrids): return sc.concat(pieces, dim='Q') +def _same_dtype(arrays): + return [arr.to(dtype='float64') for arr in arrays] + + def _interpolate_on_qgrid(curves, grid): return sc.concat( - [sc.lookup(c, grid.dim)[sc.midpoints(grid)] for c in curves], dim='curves' + _same_dtype([sc.lookup(c, grid.dim)[sc.midpoints(grid)] for c in curves]), + dim='curves', ) @@ -347,8 +333,5 @@ def orso_datasets_from_measurements( wf[name] = value wf[ReflectivityOverQ] = scale_factor * curve dataset = wf.compute(orso.OrsoIofQDataset) - dataset.info.reduction.corrections = orso.find_corrections( - wf.get(orso.OrsoIofQDataset) - ) datasets.append(dataset) return datasets diff --git a/src/ess/reflectometry/types.py b/src/ess/reflectometry/types.py index 73c07213..dd31d465 100644 --- a/src/ess/reflectometry/types.py +++ b/src/ess/reflectometry/types.py @@ -20,10 +20,6 @@ class SamplePosition(sciline.Scope[RunType, sc.Variable], sc.Variable): """The position of the sample relative to the source(?).""" -class IncidentBeam(sciline.Scope[RunType, sc.Variable], sc.Variable): - """Incident beam vector.""" - - class SpecularReflectionCoordTransformGraph(sciline.Scope[RunType, dict], dict): """Coordinate transformation graph for specular reflection""" @@ -37,40 +33,26 @@ class LoadedNeXusDetector(sciline.Scope[RunType, sc.DataGroup], sc.DataGroup): """NXdetector loaded from file""" -class ReducibleDetectorData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event time data after correcting tof, ready for reduction""" - - -class DataWithScatteringCoordinates(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event data with added coordinates such as incident angle (theta), - wavelength, and momentum transfer (Q)""" - - -class MaskedData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event data that has been masked in wavelength and logical detector coordinates""" - +class ReducibleData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): + """Event data with common coordinates added""" -class FootprintCorrectedData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): - """Event data with weights corrected for the footprint of the beam - on the sample for the incidence angle of the event.""" - -ReferenceIntensity = NewType("ReferenceIntensity", sc.DataArray) -"""Intensity distribution of the reference measurement in (z, wavelength)""" - -IdealReferenceIntensity = NewType("IdealReferenceIntensity", sc.DataArray) +ReducedReference = NewType("ReducedReference", sc.DataArray) """Intensity distribution on the detector for a sample with :math`R(Q) = 1`""" -NormalizationFactor = NewType("NormalizationFactor", sc.DataArray) -""":code`IdealReferenceIntensity` with added coordinate "sample"-Q""" +Reference = NewType("Reference", sc.DataArray) +""":code`ReducedReference` histogrammed in sample ``Q``""" + +Sample = NewType("Sample", sc.DataArray) +""":code`Sample` measurement prepared for reduction""" ReflectivityOverQ = NewType("ReflectivityOverQ", sc.DataArray) """Intensity histogram over momentum transfer normalized by the calibrated reference measurement.""" -ReflectivityData = NewType("ReflectivityData", sc.DataArray) -"""Reflectivity "per event". Event data weighted by the expected -intensity at the coordinates of the event.""" +ReflectivityOverZW = NewType("ReflectivityOverZW", sc.DataArray) +"""Intensity histogram over z- and wavelength- grid. +normalized by the calibrated reference measurement.""" QResolution = NewType("QResolution", sc.Variable) """Resolution term for the momentum transfer for each bin of QBins.""" @@ -111,11 +93,6 @@ class SampleSize(sciline.Scope[RunType, sc.Variable], sc.Variable): """Size of the sample. If None it is assumed to be the same as the reference.""" -Gravity = NewType("Gravity", sc.Variable) -"""This parameter determines if gravity is taken into account -when computing the scattering angle and momentum transfer.""" - - YIndexLimits = NewType("YIndexLimits", tuple[sc.Variable, sc.Variable]) """Limit of the (logical) 'y' detector pixel index""" diff --git a/src/ess/reflectometry/workflow.py b/src/ess/reflectometry/workflow.py index c489b2cb..be07429e 100644 --- a/src/ess/reflectometry/workflow.py +++ b/src/ess/reflectometry/workflow.py @@ -14,7 +14,7 @@ ) from ess.reflectometry.types import ( Filename, - FootprintCorrectedData, + ReducibleData, RunType, SampleRotation, SampleRun, @@ -22,16 +22,9 @@ def _concatenate_event_lists(*das): - return ( - sc.reduce(das) - .bins.concat() - .assign_coords( - { - name: das[0].coords[name] - for name in ('position', 'sample_rotation', 'detector_rotation') - } - ) - ) + da = sc.reduce(das).bins.concat() + missing_coords = set(das[0].coords) - set(da.coords) + return da.assign_coords({coord: das[0].coords[coord] for coord in missing_coords}) def _any_value(x, *_): @@ -68,9 +61,9 @@ def with_filenames( mapped = wf.map(df) - wf[FootprintCorrectedData[runtype]] = mapped[ - FootprintCorrectedData[runtype] - ].reduce(index=axis_name, func=_concatenate_event_lists) + wf[ReducibleData[runtype]] = mapped[ReducibleData[runtype]].reduce( + index=axis_name, func=_concatenate_event_lists + ) wf[RawChopper[runtype]] = mapped[RawChopper[runtype]].reduce( index=axis_name, func=_any_value ) diff --git a/tests/amor/pipeline_test.py b/tests/amor/pipeline_test.py index acdd039e..c007c35f 100644 --- a/tests/amor/pipeline_test.py +++ b/tests/amor/pipeline_test.py @@ -34,12 +34,11 @@ def amor_pipeline() -> sciline.Pipeline: pl[SampleSize[ReferenceRun]] = sc.scalar(10.0, unit="mm") pl[WavelengthBins] = sc.geomspace("wavelength", 2.8, 12, 300, unit="angstrom") - pl[YIndexLimits] = sc.scalar(11, unit=None), sc.scalar(41, unit=None) - pl[ZIndexLimits] = sc.scalar(80, unit=None), sc.scalar(370, unit=None) + pl[YIndexLimits] = sc.scalar(11), sc.scalar(41) + pl[ZIndexLimits] = sc.scalar(80), sc.scalar(370) pl[QBins] = sc.geomspace( dim="Q", start=0.005, stop=0.115, num=391, unit="1/angstrom" ) - # The sample rotation value in the file is slightly off, so we set it manually pl[SampleRotation[ReferenceRun]] = sc.scalar(0.65, unit="deg") pl[Filename[ReferenceRun]] = amor.data.amor_reference_run() @@ -56,28 +55,29 @@ def amor_pipeline() -> sciline.Pipeline: @pytest.mark.filterwarnings("ignore:Failed to convert .* into a transformation") @pytest.mark.filterwarnings("ignore:Invalid transformation, missing attribute") -def test_run_data_pipeline(amor_pipeline: sciline.Pipeline): +def test_has_expected_coordinates(amor_pipeline: sciline.Pipeline): # The sample rotation value in the file is slightly off, so we set it manually amor_pipeline[SampleRotation[SampleRun]] = sc.scalar(0.85, unit="deg") amor_pipeline[Filename[SampleRun]] = amor.data.amor_sample_run(608) - res = amor_pipeline.compute(ReflectivityOverQ) - assert "Q" in res.coords - assert "Q_resolution" in res.coords + reflectivity_over_q = amor_pipeline.compute(ReflectivityOverQ) + assert "Q" in reflectivity_over_q.coords + assert "Q_resolution" in reflectivity_over_q.coords @pytest.mark.filterwarnings("ignore:Failed to convert .* into a transformation") @pytest.mark.filterwarnings("ignore:Invalid transformation, missing attribute") -def test_run_full_pipeline(amor_pipeline: sciline.Pipeline): +def test_orso_pipeline(amor_pipeline: sciline.Pipeline): + # The sample rotation value in the file is slightly off, so we set it manually amor_pipeline[SampleRotation[SampleRun]] = sc.scalar(0.85, unit="deg") - # Make the Q range cover a larger interval than the experiment is sensitive to. - # This let's us test the non-covered regions are filtered from the ORSO data. - amor_pipeline[QBins] = sc.geomspace( - dim="Q", start=0.005, stop=0.15, num=391, unit="1/angstrom" - ) amor_pipeline[Filename[SampleRun]] = amor.data.amor_sample_run(608) res = amor_pipeline.compute(orso.OrsoIofQDataset) assert res.info.data_source.experiment.instrument == "Amor" assert res.info.reduction.software.name == "ess.reflectometry" + assert res.info.reduction.corrections == [ + "chopper ToF correction", + "footprint correction", + "supermirror calibration", + ] assert res.data.ndim == 2 assert res.data.shape[1] == 4 assert np.all(res.data[:, 1] >= 0) @@ -127,14 +127,3 @@ def test_pipeline_merging_events_result_unchanged(amor_pipeline: sciline.Pipelin assert_allclose( 2 * sc.variances(result.data), sc.variances(result2.data), rtol=sc.scalar(1e-6) ) - - -def test_find_corrections(amor_pipeline: sciline.Pipeline): - amor_pipeline[Filename[SampleRun]] = amor.data.amor_sample_run(608) - graph = amor_pipeline.get(orso.OrsoIofQDataset) - # In topological order - assert orso.find_corrections(graph) == [ - "chopper ToF correction", - "footprint correction", - "supermirror calibration", - ] diff --git a/tests/corrections_test.py b/tests/corrections_test.py index 3e7607cb..f5b4554c 100644 --- a/tests/corrections_test.py +++ b/tests/corrections_test.py @@ -1,29 +1,22 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2023 Scipp contributors (https://github.com/scipp) import scipp as sc -from scipp import constants as cst +from scipp.constants import pi from scipp.testing import assert_allclose from ess.reflectometry import corrections -def test_footprint_correction(): - data = sc.DataArray( - data=sc.array(dims=['row'], values=[1.0], unit='counts'), - coords={ - "theta": sc.array(dims=['row'], values=[cst.pi.value / 6], unit='rad'), - "px": sc.array(dims=['row'], values=[1], unit=None), - }, - ).group('px') - out = corrections.footprint_correction( - data, +def test_footprint_on_sample(): + footprint = corrections.footprint_on_sample( + pi / 6 * sc.scalar(1, unit='rad'), beam_size=sc.scalar(5, unit='mm'), sample_size=sc.scalar(10, unit='mm'), ) - expected = sc.scalar(1, unit='counts') / sc.erf( + expected = sc.erf( 1 / (sc.scalar(2) * sc.sqrt(sc.scalar(2.0) * sc.log(sc.scalar(2.0)))) ) assert_allclose( - out.data.values[0].data[0], + footprint, expected, )