diff --git a/.circleci/config.yml b/.circleci/config.yml index dc1bc81a..45c21932 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -32,7 +32,7 @@ jobs: jobname: type: string docker: - - image: cimg/python:3.12 + - image: cimg/python:3.13 environment: TOXENV=<< parameters.jobname >> steps: @@ -54,7 +54,7 @@ jobs: jobname: type: string docker: - - image: cimg/python:3.12 + - image: cimg/python:3.13 environment: TOXENV: << parameters.jobname >> GIT_SSH_COMMAND: ssh -i ~/.ssh/id_rsa_7b8fc81c13a3b446ec9aa50d3f626978 @@ -96,16 +96,16 @@ workflows: matrix: parameters: jobname: - - "py312-figure" - - "py312-figure-devdeps" + - "py313-figure" + - "py313-figure-devdeps" - deploy-reference-images: name: baseline-<< matrix.jobname >> matrix: parameters: jobname: - - "py312-figure" - - "py312-figure-devdeps" + - "py313-figure" + - "py313-figure-devdeps" requires: - << matrix.jobname >> filters: diff --git a/.cruft.json b/.cruft.json index 5361939d..7ee3549e 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,6 +1,6 @@ { "template": "https://github.com/sunpy/package-template", - "commit": "ff0522bc171a1fc63022ed2a371f70669173012e", + "commit": "37ffb52646450caa4de8ea084725dbff65fe0995", "checkout": null, "context": { "cookiecutter": { @@ -32,7 +32,7 @@ ".github/workflows/sub_package_update.yml" ], "_template": "https://github.com/sunpy/package-template", - "_commit": "ff0522bc171a1fc63022ed2a371f70669173012e" + "_commit": "37ffb52646450caa4de8ea084725dbff65fe0995" } }, "directory": null diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4699ade9..6050e320 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -33,22 +33,22 @@ jobs: tests: uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@main with: - default_python: '3.10' + default_python: '3.13' coverage: 'codecov' posargs: '--color=yes' envs: | + - linux: py313 - linux: py312 - - linux: py311 - - windows: py311-online - - macos: py310 - - linux: py310-oldestdeps + - windows: py312-online + - macos: py311 + - linux: py311-oldestdeps secrets: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} docs: uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@main with: - default_python: '3.10' + default_python: '3.12' coverage: 'codecov' envs: | - linux: build_docs-notebooks @@ -65,7 +65,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: '3.13' - run: python -m pip install -U --user build - run: python -m build . --sdist - run: python -m pip install -U --user twine @@ -77,7 +77,7 @@ jobs: coverage: 'codecov' posargs: '--color=yes' envs: | - - linux: py311-devdeps + - linux: py313-devdeps secrets: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} @@ -90,7 +90,7 @@ jobs: ) uses: OpenAstronomy/github-actions-workflows/.github/workflows/publish_pure_python.yml@main with: - python-version: '3.10' + python-version: '3.13' test_extras: tests test_command: pytest --pyargs dkist -k "not test_fail" # We have to work around a github runner bug here: https://github.com/actions/runner/issues/2788#issuecomment-2145922705 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6e27429c..5b48829e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: # This should be before any formatting hooks like isort - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.9.1" + rev: "v0.9.2" hooks: - id: ruff args: ["--fix"] @@ -26,7 +26,7 @@ repos: - id: mixed-line-ending exclude: ".*(.fits|.fts|.fit|.header|.txt|tca.*|.asdf|.json|.hdr)$" - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.0 hooks: - id: codespell args: [ "--write-changes", "-D", "-", "-D", ".codespell-dict.txt"] diff --git a/.rtd-environment.yml b/.rtd-environment.yml index 2dc187de..a2b68153 100644 --- a/.rtd-environment.yml +++ b/.rtd-environment.yml @@ -2,6 +2,6 @@ name: dkist channels: - conda-forge dependencies: - - python=3.12 + - python=3.13 - pip - graphviz!=2.42.*,!=2.43.* diff --git a/changelog/491.feature.rst b/changelog/491.feature.rst index fcdf1ca1..783bfa18 100644 --- a/changelog/491.feature.rst +++ b/changelog/491.feature.rst @@ -1 +1 @@ -Add a ``fig=`` keyword argument to `TiledDataset.plot` and make it default to the current figure. +Add a ``figure=`` keyword argument to `TiledDataset.plot` and make it default to the current figure. diff --git a/changelog/503.bugfix.rst b/changelog/503.bugfix.rst new file mode 100644 index 00000000..6008e571 --- /dev/null +++ b/changelog/503.bugfix.rst @@ -0,0 +1 @@ +Improve the ASDF detection code so out of date ASDF filenames generated by the DKIST data center are skipped if a newer filename is present. diff --git a/changelog/507.breaking.rst b/changelog/507.breaking.rst new file mode 100644 index 00000000..ccb1e7ca --- /dev/null +++ b/changelog/507.breaking.rst @@ -0,0 +1,9 @@ +The minimum supported versions of dependencies and Python have been updated, this release requires: +* Python 3.11 +* asdf 2.15 (and plugin version bumps) +* dask 2023.2 +* matplotlib 3.7 +* ndcube 2.1 +* numpy 1.25 +* parfive 2.1 +* sunpy 5.0 diff --git a/dkist/dataset/loader.py b/dkist/dataset/loader.py index 1b1f4ea0..4c1aaecb 100644 --- a/dkist/dataset/loader.py +++ b/dkist/dataset/loader.py @@ -1,11 +1,16 @@ +import re +import warnings import importlib.resources as importlib_resources from pathlib import Path from functools import singledispatch +from collections import defaultdict from parfive import Results import asdf +from dkist.utils.exceptions import DKISTUserWarning + try: # first try to import from asdf.exceptions for asdf 2.15+ from asdf.exceptions import ValidationError @@ -14,6 +19,9 @@ from asdf import ValidationError +ASDF_FILENAME_PATTERN = r"^(?P[A-Z-]+)_L1_(?P\d{8}T\d{6})_(?P[A-Z]{5,})(?P_user_tools|_metadata)?.asdf$" + + def asdf_open_memory_mapping_kwarg(memmap: bool) -> dict: if asdf.__version__ > "3.1.0": return {"memmap": memmap} @@ -138,8 +146,30 @@ def _load_from_path(path: Path): def _load_from_directory(directory): """ - Construct a `~dkist.dataset.Dataset` from a directory containing one - asdf file and a collection of FITS files. + Construct a `~dkist.dataset.Dataset` from a directory containing one (or + more) ASDF files and a collection of FITS files. + + ASDF files have the generic pattern: + + ``{instrument}_L1_{start_time:%Y%m%dT%H%M%S}_{dataset_id}[_{suffix}].asdf`` + + where the ``_{suffix}`` on the end may be absent or one of a few different + suffixes which have been used at different times. When searching a + directory for one or more ASDF file to load we should attempt to only load + one per dataset ID by selecting files in suffix order. + + The order of suffixes are (from newest used to oldest): + + - ``_metadata`` + - ``_user_tools`` + - None + + The algorithm used to find ASDF files to load in a directory is therefore: + + - Glob the directory for all ASDF files + - Group all results by the filename up to and including the dataset id in the filename + - Ignore any ASDF files with an old suffix if a new suffix is present + - Throw a warning to the user if any ASDF files with older suffixes are found """ base_path = Path(directory).expanduser() asdf_files = tuple(base_path.glob("*.asdf")) @@ -147,12 +177,60 @@ def _load_from_directory(directory): if not asdf_files: raise ValueError(f"No asdf file found in directory {base_path}.") - if len(asdf_files) > 1: - return _load_from_iterable(asdf_files) - - asdf_file = asdf_files[0] + if len(asdf_files) == 1: + return _load_from_asdf(asdf_files[0]) + + pattern = re.compile(ASDF_FILENAME_PATTERN) + candidates = [] + asdfs_to_load = [] + for filepath in asdf_files: + filename = filepath.name + + # If the asdf file doesn't match the data center pattern then we load it + # as it's probably a custom user file + if pattern.match(filename) is None: + asdfs_to_load.append(filepath) + continue + + # All the matches have to be checked + candidates.append(filepath) + + # If we only have one match load it + if len(candidates) == 1: + asdfs_to_load += candidates + else: + # Now we group by prefix + matches = [pattern.match(fp.name) for fp in candidates] + grouped = defaultdict(list) + for m in matches: + prefix = m.string.removesuffix(".asdf").removesuffix(m.group("suffix") or "") + grouped[prefix].append(m.group("suffix")) + + # Now we select the best suffix for each prefix + for prefix, suffixes in grouped.items(): + if "_metadata" in suffixes: + asdfs_to_load.append(base_path / f"{prefix}_metadata.asdf") + elif "_user_tools" in suffixes: + asdfs_to_load.append(base_path / f"{prefix}_user_tools.asdf") + elif None in suffixes: + asdfs_to_load.append(base_path / f"{prefix}.asdf") + else: + # This branch should never be hit because the regex enumerates the suffixes + raise ValueError("Unknown suffix encountered.") # pragma: no cover + + # Throw a warning if we have skipped any files + if ignored_files := set(asdf_files).difference(asdfs_to_load): + warnings.warn( + f"ASDF files with old names ({', '.join([a.name for a in ignored_files])}) " + "were found in this directory and ignored. You may want to delete these files.", + DKISTUserWarning + ) + + if len(asdfs_to_load) == 1: + return _load_from_asdf(asdfs_to_load[0]) + + return _load_from_iterable(asdfs_to_load) - return _load_from_asdf(asdf_file) def _load_from_asdf(filepath): diff --git a/dkist/dataset/tests/test_load_dataset.py b/dkist/dataset/tests/test_load_dataset.py index bc24a147..b98ab009 100644 --- a/dkist/dataset/tests/test_load_dataset.py +++ b/dkist/dataset/tests/test_load_dataset.py @@ -1,4 +1,6 @@ +import re import shutil +import numbers import pytest from parfive import Results @@ -7,6 +9,8 @@ from dkist import Dataset, TiledDataset, load_dataset from dkist.data.test import rootdir +from dkist.dataset.loader import ASDF_FILENAME_PATTERN +from dkist.utils.exceptions import DKISTUserWarning @pytest.fixture @@ -114,3 +118,86 @@ def test_not_dkist_asdf(tmp_path): with pytest.raises(TypeError, match="not a valid DKIST"): load_dataset(tmp_path / "test.asdf") + + +def generate_asdf_folder(tmp_path, asdf_path, filenames): + for fname in filenames: + shutil.copy(asdf_path, tmp_path / fname) + + return tmp_path + + +@pytest.mark.parametrize(("filename", "match"), [ + ("VBI_L1_20231016T184519_AJQWW.asdf", True), + ("VBI_L1_20231016T184519_AAAA.asdf", False), + ("VBI_L1_20231016T184519_AJQWW_user_tools.asdf", True), + ("VBI_L1_20231016T184519_AJQWW_metadata.asdf", True), + ("DL-NIRSP_L1_20231016T184519_AJQWW.asdf", True), + ("DL-NIRSP_L1_20231016T184519_AJQWW_user_tools.asdf", True), + ("DL-NIRSP_L1_20231016T184519_AJQWW_metadata.asdf", True), + ("VISP_L1_99999999T184519_AAAAAAA.asdf", True), + ("VISP_L1_20231016T888888_AAAAAAA_user_tools.asdf", True), + ("VISP_L1_20231016T184519_AAAAAAA_metadata.asdf", True), + ("VISP_L1_20231016T184519_AAAAAAA_unknown.asdf", False), + ("VISP_L1_20231016T184519.asdf", False), + ("wibble.asdf", False), + ]) +def test_asdf_regex(filename, match): + m = re.match(ASDF_FILENAME_PATTERN, filename) + assert bool(m) is match + + +@pytest.mark.parametrize(("filenames", "indices"), [ + pytest.param(("VBI_L1_20231016T184519_AJQWW.asdf",), 0, id="Single no suffix"), + pytest.param(("VBI_L1_20231016T184519_AJQWW_user_tools.asdf",), 0, id="single _user_tools"), + pytest.param(("VBI_L1_20231016T184519_AJQWW_metadata.asdf",), 0, id="single _metadata"), + pytest.param(("VBI_L1_20231016T184519_AJQWW_unknown.asdf",), 0, id="single _unknown"), + pytest.param(("VBI_L1_20231016T184519_AJQWW.asdf", + "VBI_L1_20231016T184519_AJQWW_user_tools.asdf",), 1, id="none & _user_tools"), + pytest.param(("VBI_L1_20231016T184519_AJQWW.asdf", + "VBI_L1_20231016T184519_AJQWW_user_tools.asdf", + "VBI_L1_20231016T184519_AJQWW_metadata.asdf",), 2, id="_user_tools & _metadata"), + pytest.param(("VBI_L1_20231016T184519_AJQWW.asdf", + "VBI_L1_20231016T184519_AJQWW_user_tools.asdf", + "VBI_L1_20231016T184519_AJQWW_metadata.asdf", + "VBI_L1_20231016T184519_AJQWW_unknown.asdf"), (2, 3), id="_user_tools & _metadata & _unknown"), + pytest.param(("random.asdf", + "VBI_L1_20231016T184519_AJQWW_user_tools.asdf",), (0, 1), id="other pattern & _user_tools"), + pytest.param(("random.asdf", + "VBI_L1_not_a_proper_name.asdf", + "VBI_L1_20231016T184519_AJQWW_user_tools.asdf", + "VBI_L1_20231016T184519_AJQWW_metadata.asdf",), (0, 1, 3), id="2 other patterns & _user_tools & _metadata"), + pytest.param(("VBI_L1_20231016T184519_AJQWW.asdf", + "VISP_L1_20231016T184519_AJQWW.asdf",), (0, 1), id="Two patterns, no suffix"), + pytest.param(("VBI_L1_20231016T184519_AAAAA.asdf", + "VBI_L1_20231016T184519_AAAAA_metadata.asdf", + "VBI_L1_20231116T184519_BBBBBBB.asdf", + "VBI_L1_20231216T184519_CCCCCCC.asdf", + "VBI_L1_20231216T184519_CCCCCCC_user_tools.asdf"), (1, 2, 4), id="Three patterns, mixed suffixes"), +]) +def test_select_asdf(tmp_path, asdf_path, filenames, indices, mocker): + asdf_folder = generate_asdf_folder(tmp_path, asdf_path, filenames) + + asdf_file_paths = tuple(asdf_folder / fname for fname in filenames) + + load_from_asdf = mocker.patch("dkist.dataset.loader._load_from_asdf") + load_from_iterable = mocker.patch("dkist.dataset.loader._load_from_iterable") + + # The load_dataset call should throw a warning if any files are skipped, but + # not otherwise, the warning should have the filenames of any skipped files in + tuple_of_indices = indices if isinstance(indices, tuple) else (indices,) + if len(tuple_of_indices) == len(filenames): + datasets = load_dataset(asdf_folder) + else: + files_to_be_skipped = set(filenames).difference([filenames[i] for i in tuple_of_indices]) + with pytest.warns(DKISTUserWarning, match=f".*[{'|'.join([re.escape(f) for f in files_to_be_skipped])}].*"): + datasets = load_dataset(asdf_folder) + + if isinstance(indices, numbers.Integral): + load_from_asdf.assert_called_once_with(asdf_file_paths[indices]) + else: + calls = load_from_iterable.mock_calls + # We need to assert that _load_from_iterable is called with the right + # paths but in a order-invariant way. + assert len(calls) == 1 + assert set(calls[0].args[0]) == {asdf_file_paths[i] for i in indices} diff --git a/dkist/dataset/tests/test_tiled_dataset.py b/dkist/dataset/tests/test_tiled_dataset.py index 11a9271a..2de6af3d 100644 --- a/dkist/dataset/tests/test_tiled_dataset.py +++ b/dkist/dataset/tests/test_tiled_dataset.py @@ -89,7 +89,7 @@ def test_tileddataset_plot(share_zscale): ds = TiledDataset(np.array(newtiles).reshape(ori_ds.shape), inventory=newtiles[0].inventory) fig = plt.figure(figsize=(12, 15)) - ds.plot(0, share_zscale=share_zscale, fig=fig) + ds.plot(0, share_zscale=share_zscale, figure=fig) return plt.gcf() @@ -118,7 +118,7 @@ def test_tileddataset_plot_limit_swapping(swap_tile_limits): assert non_square_ds.shape[0] != non_square_ds.shape[1] # Just in case the underlying data change for some reason fig = plt.figure(figsize=(12, 15)) - non_square_ds.plot(0, share_zscale=False, swap_tile_limits=swap_tile_limits, fig=fig) + non_square_ds.plot(0, share_zscale=False, swap_tile_limits=swap_tile_limits, figure=fig) assert fig.axes[0].get_gridspec().get_geometry() == non_square_ds.shape[::-1] for ax in fig.axes: diff --git a/dkist/dataset/tiled_dataset.py b/dkist/dataset/tiled_dataset.py index 45019c6a..bb5653f5 100644 --- a/dkist/dataset/tiled_dataset.py +++ b/dkist/dataset/tiled_dataset.py @@ -169,8 +169,7 @@ def _get_axislabels(ax): ylabel = coord.get_axislabel() or coord._get_default_axislabel() return (xlabel, ylabel) - - def plot(self, slice_index, share_zscale=False, fig=None, swap_tile_limits: Literal["x", "y", "xy"] | None = None, **kwargs): + def plot(self, slice_index, share_zscale=False, figure=None, swap_tile_limits: Literal["x", "y", "xy"] | None = None, **kwargs): """ Plot a slice of each tile in the TiledDataset @@ -184,7 +183,7 @@ def plot(self, slice_index, share_zscale=False, fig=None, swap_tile_limits: Lite Determines whether the color scale of the plots should be calculated independently (``False``) or shared across all plots (``True``). Defaults to False - fig : `matplotlib.figure.Figure` + figure : `matplotlib.figure.Figure` A figure to use for the plot. If not specified the current pyplot figure will be used, or a new one created. swap_tile_limits : `"x", "y", "xy"` or `None` (default) @@ -200,19 +199,19 @@ def plot(self, slice_index, share_zscale=False, fig=None, swap_tile_limits: Lite slice_index = (slice_index,) vmin, vmax = np.inf, 0 - if fig is None: - fig = plt.gcf() + if figure is None: + figure = plt.gcf() sliced_dataset = self.slice_tiles[slice_index] dataset_ncols, dataset_nrows = sliced_dataset.shape - gridspec = GridSpec(nrows=dataset_nrows, ncols=dataset_ncols, figure=fig) + gridspec = GridSpec(nrows=dataset_nrows, ncols=dataset_ncols, figure=figure) for col in range(dataset_ncols): for row in range(dataset_nrows): tile = sliced_dataset[col, row] # Fill up grid from the bottom row ax_gridspec = gridspec[dataset_nrows - row - 1, col] - ax = fig.add_subplot(ax_gridspec, projection=tile.wcs) + ax = figure.add_subplot(ax_gridspec, projection=tile.wcs) tile.plot(axes=ax, **kwargs) @@ -226,15 +225,15 @@ def plot(self, slice_index, share_zscale=False, fig=None, swap_tile_limits: Lite ax.set_xlabel(" ") if col == row == 0: xlabel, ylabel = self._get_axislabels(ax) - fig.supxlabel(xlabel, y=0.05) - fig.supylabel(ylabel, x=0.05) + figure.supxlabel(xlabel, y=0.05) + figure.supylabel(ylabel, x=0.05) axmin, axmax = ax.get_images()[0].get_clim() vmin = axmin if axmin < vmin else vmin vmax = axmax if axmax > vmax else vmax if share_zscale: - for ax in fig.get_axes(): + for ax in figure.get_axes(): ax.get_images()[0].set_clim(vmin, vmax) title = f"{self.inventory['instrumentName']} Dataset ({self.inventory['datasetId']}) at " @@ -245,8 +244,8 @@ def plot(self, slice_index, share_zscale=False, fig=None, swap_tile_limits: Lite val = val.symbol title += f"{coord} {val}" + (", " if i != len(slice_index)-1 else " ") title += f"(slice={(slice_index if len(slice_index) > 1 else slice_index[0])})".replace("slice(None, None, None)", ":") - fig.suptitle(title, y=0.95) - return fig + figure.suptitle(title, y=0.95) + return figure @property def slice_tiles(self): diff --git a/dkist/io/loaders.py b/dkist/io/loaders.py index 7d321328..ac702de8 100644 --- a/dkist/io/loaders.py +++ b/dkist/io/loaders.py @@ -19,19 +19,19 @@ common_parameters = """ - Parameters - ---------- - fileuri: `str` - The filename, either absolute, or if `basepath` is specified, relative to `basepath`. - shape: `tuple` - The shape of the array to be proxied. - dtype: `numpy.dtype` - The dtype of the resulting array - target: `int` - The HDU number to load the array from. - array_container: `BaseStripedExternalArray` - The parent object of this class, which builds the array from a sequence - of these loaders. +Parameters +---------- +fileuri: `str` + The filename, either absolute, or if `basepath` is specified, relative to `basepath`. +shape: `tuple` + The shape of the array to be proxied. +dtype: `numpy.dtype` + The dtype of the resulting array +target: `int` + The HDU number to load the array from. +array_container: `BaseStripedExternalArray` + The parent object of this class, which builds the array from a sequence + of these loaders. """ diff --git a/dkist/net/tests/strategies.py b/dkist/net/tests/strategies.py index c3837381..81801aff 100644 --- a/dkist/net/tests/strategies.py +++ b/dkist/net/tests/strategies.py @@ -72,7 +72,7 @@ def aunit(draw, number=st.floats(allow_nan=False, allow_infinity=False, min_valu @st.composite def _embargo_end(draw, time=Times( - max_value=datetime.datetime(datetime.datetime.utcnow().year, 1, 1, 0, 0), + max_value=datetime.datetime(datetime.datetime.now(datetime.UTC).year, 1, 1, 0, 0), min_value=datetime.datetime(1981, 1, 1, 0, 0)), delta=TimeDelta()): t1 = draw(time) diff --git a/pyproject.toml b/pyproject.toml index 79a421da..ce2577ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,37 +9,37 @@ build-backend = "setuptools.build_meta" [project] name = "dkist" description = "DKIST User Tools" -requires-python = ">=3.10" +requires-python = ">=3.11" readme = { file = "README.rst", content-type = "text/x-rst" } license = { file = "licenses/LICENSE.rst" } authors = [ { name = "NSO / AURA", email = "stuart@cadair.com" }, ] dependencies = [ - "aiohttp>=3.8", + "aiohttp>=3.9", # Provide minimum deps for all asdf packages used to generate or read asdf # files so that we test with these minimums and also generate asdf's with # them # Some of these schema dependencies are minimums because we generated asdf # files with them unpinned so they are now required to read generated asdf # files. - "asdf>=2.11.2", - "asdf-astropy>=0.2.0", - "asdf-coordinates-schemas>=0.1.0", - "asdf-standard>=1.0.3", - "asdf-transform-schemas>=0.3.0", + "asdf>=2.15.0", + "asdf-astropy>=0.4.0", + "asdf-coordinates-schemas>=0.2.0", + "asdf-standard>=1.1.0", + "asdf-transform-schemas>=0.4.0", "asdf-wcs-schemas>=0.3.0", "astropy>=5.3", - "dask[array]>=2021.8.0", + "dask[array]>=2023.2.0", "globus-sdk>=3.0", "gwcs>=0.19.0", - "matplotlib>=3.5", - "ndcube[plotting,reproject]>=2.0", - "numpy>=1.22", - "parfive[ftp]>=1.5", + "matplotlib>=3.7", + "ndcube[plotting,reproject]>=2.1", + "numpy>=1.25", + "parfive[ftp]>=2.1", "platformdirs>=3.0", - "sunpy[net,asdf]>=4.0.7", - "tqdm>=4.63", + "sunpy[net,asdf]>=5.0", + "tqdm>=4.65", ] dynamic = ["version"] @@ -69,7 +69,7 @@ docs = [ "sphinx-gallery", "pytest", "sphinx_autodoc_typehints", - "dkist-sphinx-theme>=1.1.2", + "dkist-sphinx-theme>=2.0", "sphinx-design", "myst-nb", "ipywidgets", diff --git a/pytest.ini b/pytest.ini index 3f278400..59426483 100644 --- a/pytest.ini +++ b/pytest.ini @@ -52,8 +52,7 @@ filterwarnings = ignore:ERFA function "taiutc"* ignore:ERFA function "utcut1"* ignore:Tried to get polar motions for times after IERS data is valid* - # Zeep deprecation warning - ignore:defusedxml.lxml is no longer supported and will be removed in a future release. + ignore:leap-second auto-update failed due to the following exception.* # This is due to dependencies building with a numpy version different from # the local installed numpy version, but should be fine # See https://github.com/numpy/numpy/issues/15748#issuecomment-598584838 @@ -65,28 +64,8 @@ filterwarnings = ignore:the imp module is deprecated in favour of importlib:DeprecationWarning:ipykernel.iostream # Ignore warnings about asdf versions ignore:File.*asdf.extension.BuiltinExtension.* - # pytest / asdf interaction - ignore:The .* argument to AsdfSchemaFile is deprecated - # gwcs main has the wrong version number: https://github.com/spacetelescope/gwcs/issues/399 - ignore:File.*from package gwcs\=\=0\.18\.0\), but older package \(gwcs.*\) is installed. # Ignore asdf older version errors ignore: File.* was created with extension.*but older package.*is installed. - ignore:The distutils.sysconfig module is deprecated, use sysconfig instead:DeprecationWarning - ignore:FLIP_TOP_BOTTOM is deprecated and will be removed in Pillow.* - ignore::ResourceWarning - # Zeep relies on deprecated cgi in Python 3.11 - ignore:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning:zeep.utils # Oldestdeps below here - ignore:`np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself.::dask.array - ignore:leap-second auto-update failed due to the following exception - ignore:"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead - ignore:The loop argument is deprecated since Python 3.8, and scheduled for removal in Python 3.10. - ignore:Subclassing validator classes is not intended to be part of their public API - # Ignore warning so gwcs 0.18.3 can load - ignore:Converter handles multiple tags for this extension - # https://github.com/pandas-dev/pandas/issues/54466 - ignore:\nPyarrow will become a required dependency of pandas in the next major release of pandas:DeprecationWarning - # This seems to be coming out of pandas - ignore:datetime.datetime.utcfromtimestamp.*:DeprecationWarning - # The new way of doing it is 3.11 - ignore:datetime.datetime.utcnow.*:DeprecationWarning + ignore:pkg_resources is deprecated as an API.*:DeprecationWarning + ignore:Deprecated call to .*pkg_resources\.declare_namespace.*mpl_toolkits.*:DeprecationWarning diff --git a/tox.ini b/tox.ini index a9b041a3..a7ec5353 100644 --- a/tox.ini +++ b/tox.ini @@ -5,10 +5,10 @@ requires = pip >= 21.0.1 tox-pypi-filter >= 0.14 envlist = - py{310,311,312} - py312-{devdeps,benchmarks,figure} - py312-figure-devdeps - py310-oldestdeps + py{311,312,313} + py313-{devdeps,benchmarks,figure} + py313-figure-devdeps + py311-oldestdeps codestyle build_docs{,-notebooks} @@ -108,8 +108,8 @@ extras = docs commands = pip freeze --all --no-input - # Disable parallel here due to https://github.com/astropy/astropy/issues/14916 sphinx-build \ + # Disable parallel here due to https://github.com/astropy/astropy/issues/14916 -j 1 \ --color \ -W \