diff --git a/doc/changes/devel/12966.newfeature.rst b/doc/changes/devel/12966.newfeature.rst new file mode 100644 index 00000000000..dff334d9b0a --- /dev/null +++ b/doc/changes/devel/12966.newfeature.rst @@ -0,0 +1 @@ +Add ability to use :func:`mne.preprocessing.compute_fine_calibration` with non-Neuromag-style systems, as well as options to control the bad-angle and error tolerances, by `Eric Larson`_. diff --git a/mne/preprocessing/_fine_cal.py b/mne/preprocessing/_fine_cal.py index a1b9f32c098..41d20539ce0 100644 --- a/mne/preprocessing/_fine_cal.py +++ b/mne/preprocessing/_fine_cal.py @@ -6,7 +6,7 @@ from functools import partial import numpy as np -from scipy.optimize import fmin_cobyla +from scipy.optimize import minimize from .._fiff.pick import pick_info, pick_types from .._fiff.tag import _coil_trans_to_loc, _loc_to_coil_trans @@ -16,6 +16,7 @@ from ..utils import ( _check_fname, _check_option, + _clean_names, _ensure_int, _pl, _reg_pinv, @@ -43,6 +44,9 @@ def compute_fine_calibration( origin=(0.0, 0.0, 0.0), cross_talk=None, calibration=None, + *, + angle_limit=5.0, + err_limit=5.0, verbose=None, ): """Compute fine calibration from empty-room data. @@ -68,6 +72,17 @@ def compute_fine_calibration( Dictionary with existing calibration. If provided, the magnetometer imbalances and adjusted normals will be used and only the gradiometer imbalances will be estimated (see step 2 in Notes below). + angle_limit : float + The maximum permitted angle in degrees between the original and adjusted + magnetometer normals. If the angle is exceeded, the segment is treated as + an outlier and discarded. + + .. versionadded:: 1.9 + err_limit : float + The maximum error (in percent) for each channel in order for a segment to + be used. + + .. versionadded:: 1.9 %(verbose)s Returns @@ -114,6 +129,12 @@ def compute_fine_calibration( ext_order = _ensure_int(ext_order, "ext_order") origin = _check_origin(origin, raw.info, "meg", disp=True) _check_option("raw.info['bads']", raw.info["bads"], ([],)) + _validate_type(err_limit, "numeric", "err_limit") + _validate_type(angle_limit, "numeric", "angle_limit") + for key, val in dict(err_limit=err_limit, angle_limit=angle_limit).items(): + if val < 0: + raise ValueError(f"{key} must be greater than or equal to 0, got {val}") + # Fine cal should not include ref channels picks = pick_types(raw.info, meg=True, ref_meg=False) if raw.info["dev_head_t"] is not None: raise ValueError( @@ -144,7 +165,7 @@ def compute_fine_calibration( locs = np.array([ch["loc"] for ch in info["chs"]]) zs = locs[mag_picks, -3:].copy() if calibration is not None: - _, calibration, _ = _prep_fine_cal(info, calibration) + _, calibration, _ = _prep_fine_cal(info, calibration, ignore_ref=True) for pi, pick in enumerate(mag_picks): idx = calibration["ch_names"].index(info["ch_names"][pick]) cals[pick] = calibration["imb_cals"][idx].item() @@ -164,7 +185,14 @@ def compute_fine_calibration( data = raw[picks, start:stop][0] if ctc is not None: data = ctc.dot(data) - z, cal, good = _adjust_mag_normals(info, data, origin, ext_order) + z, cal, good = _adjust_mag_normals( + info, + data, + origin, + ext_order, + angle_limit=angle_limit, + err_limit=err_limit, + ) if good: z_list.append(z) cal_list.append(cal) @@ -213,7 +241,8 @@ def compute_fine_calibration( cals[ii : ii + 1] if ii in mag_picks else imb[ii] for ii in range(len(info["ch_names"])) ] - calibration = dict(ch_names=info["ch_names"], locs=locs, imb_cals=imb_cals) + ch_names = _clean_names(info["ch_names"], remove_whitespace=True) + calibration = dict(ch_names=ch_names, locs=locs, imb_cals=imb_cals) return calibration, count @@ -252,7 +281,7 @@ def _vector_angle(x, y): ) -def _adjust_mag_normals(info, data, origin, ext_order): +def _adjust_mag_normals(info, data, origin, ext_order, *, angle_limit, err_limit): """Adjust coil normals using magnetometers and empty-room data.""" # in principle we could allow using just mag or mag+grad, but MF uses # just mag so let's follow suit @@ -317,9 +346,16 @@ def _adjust_mag_normals(info, data, origin, ext_order): ) # Figure out the additive term for z-component - zs[cal_idx] = fmin_cobyla( - objective, old_z, cons=(), rhobeg=1e-3, rhoend=1e-4, disp=False - ) + zs[cal_idx] = minimize( + objective, + old_z, + bounds=[(-2, 2)] * 3, + # BFGS is the default for minimize but COBYLA converges faster + method="COBYLA", + # Start with a small relative step because nominal geometry information + # should be fairly accurate to begin with + options=dict(rhobeg=1e-1), + ).x # Do in-place adjustment to all_coils cals[cal_idx] = 1.0 / np.linalg.norm(zs[cal_idx]) @@ -347,12 +383,15 @@ def _adjust_mag_normals(info, data, origin, ext_order): # Chunk is usable if all angles and errors are both small reason = list() max_angle = np.max(angles) - if max_angle >= 5.0: - reason.append(f"max angle {max_angle:0.2f} >= 5°") + if max_angle >= angle_limit: + reason.append(f"max angle {max_angle:0.2f} >= {angle_limit:0.1f}°") each_err = _data_err(data, S_tot, cals, axis=-1)[picks_mag] - n_bad = (each_err > 5.0).sum() + n_bad = (each_err > err_limit).sum() if n_bad: - reason.append(f"{n_bad} residual{_pl(n_bad)} > 5%") + reason.append( + f"{n_bad} residual{_pl(n_bad)} > {err_limit:0.1f}% " + f"(max: {each_err.max():0.2f}%)" + ) reason = ", ".join(reason) if reason: reason = f" ({reason})" diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index afd3c72b5fb..d524094968c 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -2,7 +2,7 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. -from collections import Counter, OrderedDict +from collections import Counter from functools import partial from math import factorial from os import path as op @@ -2070,7 +2070,7 @@ def _overlap_projector(data_int, data_res, corr): return V_principal -def _prep_fine_cal(info, fine_cal): +def _prep_fine_cal(info, fine_cal, *, ignore_ref): from ._fine_cal import read_fine_calibration _validate_type(fine_cal, (dict, "path-like")) @@ -2081,17 +2081,18 @@ def _prep_fine_cal(info, fine_cal): extra = "dict" logger.info(f" Using fine calibration {extra}") ch_names = _clean_names(info["ch_names"], remove_whitespace=True) - info_to_cal = OrderedDict() + info_to_cal = dict() missing = list() - for ci, name in enumerate(fine_cal["ch_names"]): - if name not in ch_names: + names_clean = _clean_names(fine_cal["ch_names"], remove_whitespace=True) + for ci, (name, name_clean) in enumerate(zip(fine_cal["ch_names"], names_clean)): + if name_clean not in ch_names: missing.append(name) else: - oi = ch_names.index(name) + oi = ch_names.index(name_clean) info_to_cal[oi] = ci - meg_picks = pick_types(info, meg=True, exclude=[]) + meg_picks = pick_types(info, meg=True, exclude=[], ref_meg=not ignore_ref) if len(info_to_cal) != len(meg_picks): - bad = sorted({ch_names[pick] for pick in meg_picks} - set(fine_cal["ch_names"])) + bad = sorted({ch_names[pick] for pick in meg_picks} - set(names_clean)) raise RuntimeError( f"Not all MEG channels found in fine calibration file, missing:\n{bad}" ) @@ -2102,9 +2103,9 @@ def _prep_fine_cal(info, fine_cal): def _update_sensor_geometry(info, fine_cal, ignore_ref): """Replace sensor geometry information and reorder cal_chs.""" - info_to_cal, fine_cal, ch_names = _prep_fine_cal(info, fine_cal) - grad_picks = pick_types(info, meg="grad", exclude=()) - mag_picks = pick_types(info, meg="mag", exclude=()) + info_to_cal, fine_cal, _ = _prep_fine_cal(info, fine_cal, ignore_ref=ignore_ref) + grad_picks = pick_types(info, meg="grad", exclude=(), ref_meg=not ignore_ref) + mag_picks = pick_types(info, meg="mag", exclude=(), ref_meg=not ignore_ref) # Determine gradiometer imbalances and magnetometer calibrations grad_imbalances = np.array( @@ -2134,7 +2135,11 @@ def _update_sensor_geometry(info, fine_cal, ignore_ref): assert not used[oi] used[oi] = True info_ch = info["chs"][oi] - ch_num = int(fine_cal["ch_names"][ci].lstrip("MEG").lstrip("0")) + # This only works for VV-like names + try: + ch_num = int(fine_cal["ch_names"][ci].lstrip("MEG").lstrip("0")) + except ValueError: # invalid literal for int() with base 10 + ch_num = oi cal_chans.append([ch_num, info_ch["coil_type"]]) # Some .dat files might only rotate EZ, so we must check first that @@ -2174,7 +2179,7 @@ def _update_sensor_geometry(info, fine_cal, ignore_ref): # Channel positions are not changed info_ch["loc"][3:] = cal_loc[3:] assert info_ch["coord_frame"] == FIFF.FIFFV_COORD_DEVICE - meg_picks = pick_types(info, meg=True, exclude=()) + meg_picks = pick_types(info, meg=True, exclude=(), ref_meg=not ignore_ref) assert used[meg_picks].all() assert not used[np.setdiff1d(np.arange(len(used)), meg_picks)].any() # This gets written to the Info struct @@ -2186,7 +2191,7 @@ def _update_sensor_geometry(info, fine_cal, ignore_ref): np.clip(ang_shift, -1.0, 1.0, ang_shift) np.rad2deg(np.arccos(ang_shift), ang_shift) # Convert to degrees logger.info( - " Adjusted coil positions by (μ ± σ): " + " Adjusted coil orientations by (μ ± σ): " f"{np.mean(ang_shift):0.1f}° ± {np.std(ang_shift):0.1f}° " f"(max: {np.max(np.abs(ang_shift)):0.1f}°)" ) diff --git a/mne/preprocessing/tests/test_fine_cal.py b/mne/preprocessing/tests/test_fine_cal.py index 247998ba31c..b25b824bae0 100644 --- a/mne/preprocessing/tests/test_fine_cal.py +++ b/mne/preprocessing/tests/test_fine_cal.py @@ -2,14 +2,16 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. +from pathlib import Path + import numpy as np import pytest -from numpy.testing import assert_allclose +from numpy.testing import assert_allclose, assert_array_less from mne import pick_types from mne._fiff.tag import _loc_to_coil_trans from mne.datasets import testing -from mne.io import read_raw_fif +from mne.io import read_raw_ctf, read_raw_fif, read_raw_fil, read_raw_kit from mne.preprocessing import ( compute_fine_calibration, maxwell_filter, @@ -27,6 +29,18 @@ erm_fname = data_path / "SSS" / "141027_cropped_90Hz_raw.fif" ctc = data_path / "SSS" / "ct_sparse.fif" cal_mf_fname = data_path / "SSS" / "141027.dat" +triux_path = data_path / "SSS" / "TRIUX" +tri_fname = triux_path / "triux_bmlhus_erm_raw.fif" +tri_cal_fname = triux_path / "sss_cal_BMLHUS.dat" +ctf_fname_continuous = data_path / "CTF" / "testdata_ctf.ds" +io_dir = Path(__file__).parents[2] / "io" +kit_dir = io_dir / "kit" / "tests" / "data" +sqd_path = kit_dir / "test.sqd" +mrk_path = kit_dir / "test_mrk.sqd" +elp_path = kit_dir / "test_elp.txt" +hsp_path = kit_dir / "test_hsp.txt" +fil_fname = data_path / "FIL" / "sub-noise_ses-001_task-noise220622_run-001_meg.bin" +td_mark = testing._pytest_mark() @pytest.mark.parametrize("fname", (cal_mf_fname, fine_cal_fname, fine_cal_fname_3d)) @@ -45,14 +59,52 @@ def test_fine_cal_io(tmp_path, fname): assert object_diff(fine_cal_dict, fine_cal_dict_reload) == "" -@pytest.mark.slowtest @testing.requires_testing_data -def test_compute_fine_cal(): +@pytest.mark.parametrize( + "kind", + [ + pytest.param("VectorView", marks=pytest.mark.ultraslowtest), # ~7s + pytest.param("TRIUX", marks=pytest.mark.ultraslowtest), # ~14s + ], +) +def test_compute_fine_cal(kind): """Test computing fine calibration coefficients.""" - raw = read_raw_fif(erm_fname) - want_cal = read_fine_calibration(cal_mf_fname) + cl = dict(mag=(0.99, 1.01), grad=(0.99, 1.01)) + if kind == "VectorView": + erm = erm_fname + cal = cal_mf_fname + err_limit = 5 + angle_limit = 5 + gwoma = [66, 68] + ggoma = [55, 150] + ggwma = [62, 86] + sfs = [26, 27, 61, 63, 61, 63, 68, 70] + cl3 = [0.6, 0.7] + else: + assert kind == "TRIUX" + erm = tri_fname + cal = tri_cal_fname + err_limit = 10 + angle_limit = 10 + cl["grad"] = (0.0, 0.1) + gwoma = [48, 52] + ggoma = [13, 67] + ggwma = [13, 120] + sfs = [34, 35, 27, 28, 50, 53, 75, 79] # ours is better! + cl3 = [-0.3, -0.1] + raw = read_raw_fif(erm) + want_cal = read_fine_calibration(cal) + with pytest.raises(ValueError, match="err_limit.*greater.*0"): + compute_fine_calibration(raw, err_limit=-1) + with pytest.raises(ValueError, match="angle_limit.*greater.*0"): + compute_fine_calibration(raw, angle_limit=-1) got_cal, counts = compute_fine_calibration( - raw, cross_talk=ctc, n_imbalance=1, verbose="debug" + raw, + cross_talk=ctc, + n_imbalance=1, + err_limit=err_limit, + angle_limit=angle_limit, + verbose=True, ) assert counts == 1 assert set(got_cal.keys()) == set(want_cal.keys()) @@ -66,7 +118,8 @@ def test_compute_fine_cal(): assert got_imb.shape == want_imb.shape == (306, 1) got_imb, want_imb = got_imb[:, 0], want_imb[:, 0] - orig_locs = np.array([ch["loc"] for ch in raw.info["chs"][:306]]) + meg_picks = pick_types(raw.info, meg=True, ref_meg=False, exclude=()) + orig_locs = np.array([raw.info["chs"][pick]["loc"] for pick in meg_picks]) want_locs = want_cal["locs"] got_locs = got_cal["locs"] assert want_locs.shape == got_locs.shape @@ -75,46 +128,55 @@ def test_compute_fine_cal(): want_trans = _loc_to_coil_trans(want_locs) got_trans = _loc_to_coil_trans(got_locs) want_orig_angles, want_orig_dist = _angle_dist_between_rigid( - want_trans, orig_trans, angle_units="deg" + want_trans, + orig_trans, + angle_units="deg", + distance_units="mm", ) got_want_angles, got_want_dist = _angle_dist_between_rigid( - got_trans, want_trans, angle_units="deg" + got_trans, + want_trans, + angle_units="deg", + distance_units="mm", ) got_orig_angles, got_orig_dist = _angle_dist_between_rigid( - got_trans, orig_trans, angle_units="deg" + got_trans, + orig_trans, + angle_units="deg", + distance_units="mm", ) - assert_allclose(got_want_dist, 0.0, atol=1e-6) - assert_allclose(got_orig_dist, 0.0, atol=1e-6) + assert_array_less(got_want_dist, 0.01) + assert_array_less(got_orig_dist, 0.01) for key in ("mag", "grad"): # imb_cals value - p = pick_types(raw.info, meg=key, exclude=()) + p = np.searchsorted(meg_picks, pick_types(raw.info, meg=key, exclude=())) r2 = np.dot(got_imb[p], want_imb[p]) / ( np.linalg.norm(want_imb[p]) * np.linalg.norm(got_imb[p]) ) - assert 0.99 < r2 <= 1.00001, f"{key}: {r2:0.3f}" + assert cl[key][0] < r2 <= cl[key][1], f"{key}: {r2:0.3f}" # rotation angles want_orig_max_angle = want_orig_angles[p].max() got_orig_max_angle = got_orig_angles[p].max() got_want_max_angle = got_want_angles[p].max() if key == "mag": assert 8 < want_orig_max_angle < 11, want_orig_max_angle - assert 1 < got_orig_max_angle < 2, got_orig_max_angle - assert 9 < got_want_max_angle < 11, got_want_max_angle + assert 1 < got_orig_max_angle < 8, got_orig_max_angle + assert 8 < got_want_max_angle < 11, got_want_max_angle else: # Some of these angles are large, but mostly this has to do with # processing a very short (one 10-s segment), downsampled (90 Hz) # file - assert 66 < want_orig_max_angle < 68, want_orig_max_angle - assert 56 < got_orig_max_angle < 113, got_orig_max_angle - assert 53 < got_want_max_angle < 60, got_want_max_angle + assert gwoma[0] < want_orig_max_angle < gwoma[1] + assert ggoma[0] < got_orig_max_angle < ggoma[1] + assert ggwma[0] < got_want_max_angle < ggwma[1] kwargs = dict(bad_condition="warning", cross_talk=ctc, coord_frame="meg") raw_sss = maxwell_filter(raw, **kwargs) raw_sss_mf = maxwell_filter(raw, calibration=cal_mf_fname, **kwargs) raw_sss_py = maxwell_filter(raw, calibration=got_cal, **kwargs) - _assert_shielding(raw_sss, raw, 26, 27) - _assert_shielding(raw_sss_mf, raw, 61, 63) - _assert_shielding(raw_sss_py, raw, 61, 63) + _assert_shielding(raw_sss, raw, *sfs[0:2]) + _assert_shielding(raw_sss_mf, raw, *sfs[2:4]) + _assert_shielding(raw_sss_py, raw, *sfs[4:6]) # redoing with given mag data should yield same result got_cal_redo, _ = compute_fine_calibration( @@ -126,16 +188,104 @@ def test_compute_fine_cal(): assert sum([(ic == 1.0).any() for ic in got_cal["imb_cals"]]) == 0 # redoing with 3 imlabance parameters should improve the shielding factor - grad_picks = pick_types(raw.info, meg="grad") - assert len(grad_picks) == 204 and grad_picks[0] == 0 - got_grad_imbs = np.array([got_cal["imb_cals"][pick] for pick in grad_picks]) + grad_subpicks = np.searchsorted(meg_picks, pick_types(raw.info, meg="grad")) + assert len(grad_subpicks) == 204 and grad_subpicks[0] in (0, 1) + got_grad_imbs = np.array([got_cal["imb_cals"][pick] for pick in grad_subpicks]) assert got_grad_imbs.shape == (204, 1) got_cal_3, _ = compute_fine_calibration( raw, cross_talk=ctc, n_imbalance=3, calibration=got_cal, verbose="debug" ) - got_grad_3_imbs = np.array([got_cal_3["imb_cals"][pick] for pick in grad_picks]) + got_grad_3_imbs = np.array([got_cal_3["imb_cals"][pick] for pick in grad_subpicks]) assert got_grad_3_imbs.shape == (204, 3) corr = np.corrcoef(got_grad_3_imbs[:, 0], got_grad_imbs[:, 0])[0, 1] - assert 0.6 < corr < 0.7 + assert cl3[0] < corr < cl3[1] raw_sss_py = maxwell_filter(raw, calibration=got_cal_3, **kwargs) - _assert_shielding(raw_sss_py, raw, 68, 70) + _assert_shielding(raw_sss_py, raw, *sfs[6:8]) + + +@pytest.mark.parametrize( + "system", + [ + pytest.param("kit", marks=[pytest.mark.ultraslowtest]), # ~6s + pytest.param("ctf", marks=[td_mark, pytest.mark.ultraslowtest]), # ~13s + pytest.param("fil", marks=[td_mark]), # ~3s + pytest.param("triux", marks=[td_mark, pytest.mark.slowtest]), # ~7s + ], +) +def test_fine_cal_systems(system, tmp_path): + """Test fine calibration with different systems.""" + int_order = 8 + n_ref = 0 + if system == "kit": + raw = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path) + angle_limit = 170 + err_limit = 500 + n_ref = 3 + corrs = (0.58, 0.61, 0.57) + sfs = [0.9, 1.1, 2.3, 2.8] + corr_tol = 0.3 + elif system == "ctf": + raw = read_raw_ctf(ctf_fname_continuous).crop(0, 1) + raw.apply_gradient_compensation(0) + angle_limit = 170 + err_limit = 6000 + n_ref = 28 + corrs = (0.19, 0.41, 0.49) + sfs = [0.5, 0.7, 0.9, 1.5] + corr_tol = 0.45 + elif system == "fil": + raw = read_raw_fil(fil_fname, verbose="error") + raw.info["bads"] = [f"G2-{a}-{b}" for a in ("MW", "DS", "DT") for b in "YZ"] + raw.pick("mag", exclude="bads") # no sensor positions + raw.crop(1, 2) + angle_limit = 55 + err_limit = 15 + int_order = 5 + corrs = (0.13, 0.0, 0.12) + sfs = [4, 5, 125, 145] + corr_tol = 0.15 + else: + assert system == "triux", f"Unknown system {system}" + raw = read_raw_fif(tri_fname) + angle_limit = 7 + err_limit = 10 + corrs = (-0.13, 0.01, 0.11) + sfs = [26, 28, 100, 110] + corr_tol = 0.05 + raw.info["dev_head_t"] = None # fake empty-room even if it's not + # avoid line noise and speed up computation + raw.load_data().resample(50, method="polyphase") + fc, n = compute_fine_calibration( + raw, + angle_limit=angle_limit, + err_limit=err_limit, + verbose=True, + ) + assert n == 1 + # ensure ref sensors not in fine calibration + ref_picks = pick_types(raw.info, meg=False, ref_meg=True) + assert len(ref_picks) == n_ref + for pick in ref_picks: + assert raw.info["ch_names"][pick] not in fc["ch_names"] + # write it, read it back, ensure it can be applied + fname = tmp_path / "fc.dat" + write_fine_calibration(fname, fc) + fc_in = read_fine_calibration(fname) + kwargs = dict( + coord_frame="meg", + origin=(0.0, 0.0, 0.0), + ignore_ref=True, + regularize=None, + bad_condition="ignore", + int_order=int_order, + ) + raw_sss = maxwell_filter(raw, **kwargs) + _assert_shielding(raw_sss, raw, *sfs[0:2]) + raw_sss_cal = maxwell_filter(raw, calibration=fc_in, **kwargs) + _assert_shielding(raw_sss_cal, raw, *sfs[2:4]) + raw_data = raw.get_data("mag").ravel() + raw_sss_data = raw_sss.get_data("mag").ravel() + raw_sss_cal_data = raw_sss_cal.get_data("mag").ravel() + got_corrs = np.corrcoef([raw_data, raw_sss_data, raw_sss_cal_data]) + got_corrs = got_corrs[np.triu_indices(3, 1)] + assert_allclose(got_corrs, corrs, atol=corr_tol) diff --git a/tools/install_pre_requirements.sh b/tools/install_pre_requirements.sh index 42a44c61916..2577d47d34b 100755 --- a/tools/install_pre_requirements.sh +++ b/tools/install_pre_requirements.sh @@ -40,7 +40,9 @@ echo "OpenMEEG" python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://test.pypi.org/simple" "openmeeg>=2.6.0.dev4" echo "nilearn" -python -m pip install $STD_ARGS git+https://github.com/nilearn/nilearn +# TODO: Revert once settled: +# https://github.com/scikit-learn/scikit-learn/pull/30268#issuecomment-2479701651 +python -m pip install $STD_ARGS "git+https://github.com/larsoner/nilearn@sklearn" echo "VTK" # No pre until PyVista fixes a bug