Skip to content

Commit

Permalink
Try sharing common parts of workflow compute to make tests run faster
Browse files Browse the repository at this point in the history
This is demonstrating an approach that could save some test runtime. In
this example a parametrized test is running a nearly identical workflow
8 times, each time loading data from scratch, etc.

The change here moves some of the common parts into a module-scope
fixture. On my machine this reduces the test runtime from 50 seconds to
22 seconds.

I do not know if this is a great solution. If we want to go with this, a
helper utility could be made: Given a sciline.Pipeline and a set of
keys, compute all intermediate results that to not depend on those keys
and set the results as "static" data in the workflow. This would avoid
error-prone manual authoring of fixtures like the one I added here.
  • Loading branch information
SimonHeybrock committed Oct 30, 2024
1 parent 947b649 commit b156696
Showing 1 changed file with 27 additions and 3 deletions.
30 changes: 27 additions & 3 deletions tests/loki/iofq_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from ess import loki, sans
from ess.sans.conversions import ElasticCoordTransformGraph
from ess.sans.types import (
BackgroundRun,
BackgroundSubtractedIofQ,
BackgroundSubtractedIofQxy,
BeamCenter,
Expand All @@ -21,15 +22,20 @@
CorrectForGravity,
Denominator,
DimsToKeep,
Incident,
IofQ,
IofQxy,
MaskedData,
MaskedSolidAngle,
Numerator,
QBins,
QxBins,
QyBins,
ReturnEvents,
SampleRun,
TofMonitor,
Transmission,
TransmissionRun,
UncertaintyBroadcastMode,
WavelengthBands,
WavelengthBins,
Expand Down Expand Up @@ -85,6 +91,23 @@ def test_pipeline_can_compute_IofQ(uncertainties, qxy: bool):
assert_identical(result, reference)


@pytest.fixture(scope='module')
def workflow_with_data():
pipeline = make_workflow()
pipeline[BeamCenter] = sans.beam_center_from_center_of_mass(pipeline)
for run in (SampleRun, BackgroundRun):
keys = (
MaskedData[run],
MaskedSolidAngle[run],
TofMonitor[run, Incident],
TofMonitor[TransmissionRun[run], Incident],
TofMonitor[TransmissionRun[run], Transmission],
)
for key, value in pipeline.compute(keys).items():
pipeline[key] = value
return pipeline


@pytest.mark.parametrize(
'uncertainties',
[UncertaintyBroadcastMode.drop, UncertaintyBroadcastMode.upper_bound],
Expand All @@ -98,10 +121,11 @@ def test_pipeline_can_compute_IofQ(uncertainties, qxy: bool):
BackgroundSubtractedIofQxy,
],
)
def test_pipeline_can_compute_IofQ_in_event_mode(uncertainties, target):
pipeline = make_workflow()
def test_pipeline_can_compute_IofQ_in_event_mode(
uncertainties, target, workflow_with_data
):
pipeline = workflow_with_data.copy()
pipeline[UncertaintyBroadcastMode] = uncertainties
pipeline[BeamCenter] = sans.beam_center_from_center_of_mass(pipeline)
reference = pipeline.compute(target)
pipeline[ReturnEvents] = True
result = pipeline.compute(target)
Expand Down

0 comments on commit b156696

Please sign in to comment.