diff --git a/src/extremeweatherbench/case.py b/src/extremeweatherbench/case.py index fcd29ed..5e33efd 100644 --- a/src/extremeweatherbench/case.py +++ b/src/extremeweatherbench/case.py @@ -1,4 +1,4 @@ -"""Utiltiies for defining individual units of case studies for analysis. +"""Classes for defining individual units of case studies for analysis. Some code similarly structured to WeatherBench (Rasp et al.).""" import dataclasses diff --git a/src/extremeweatherbench/config.py b/src/extremeweatherbench/config.py index 2597a02..1e8f14e 100644 --- a/src/extremeweatherbench/config.py +++ b/src/extremeweatherbench/config.py @@ -42,9 +42,6 @@ class Config: cache: bool = False -# TODO(daniel): Convert to just a dictionary mapping, since that's all this is. No need -# for a dataclass here. Can also use a defaultdict that reflexively maps to keys added -# to the mapping unless otherwise specified. @dataclasses.dataclass class ForecastSchemaConfig: """A mapping between standard variable names used across EWB, and their counterpart diff --git a/src/extremeweatherbench/evaluate.py b/src/extremeweatherbench/evaluate.py index e40ce78..124ce1d 100644 --- a/src/extremeweatherbench/evaluate.py +++ b/src/extremeweatherbench/evaluate.py @@ -181,6 +181,7 @@ def _open_forecast_dataset( eval_config: config.Config, forecast_schema_config: config.ForecastSchemaConfig = DEFAULT_FORECAST_SCHEMA_CONFIG, ): + """Open the forecast dataset specified for evaluation.""" logging.info("Opening forecast dataset") if eval_config.forecast_dir.startswith("s3://"): fs = fsspec.filesystem("s3") diff --git a/src/extremeweatherbench/events.py b/src/extremeweatherbench/events.py index b0529af..09013b7 100644 --- a/src/extremeweatherbench/events.py +++ b/src/extremeweatherbench/events.py @@ -5,9 +5,6 @@ from typing import List, Optional from extremeweatherbench import case -# TODO(taylor): don't need link here -CLIMATOLOGY_LINK = "/home/taylor/data/era5_2m_temperature_85th_by_hour_dayofyear.zarr" - @dataclasses.dataclass class EventContainer: diff --git a/src/extremeweatherbench/metrics.py b/src/extremeweatherbench/metrics.py index f556710..260aabe 100644 --- a/src/extremeweatherbench/metrics.py +++ b/src/extremeweatherbench/metrics.py @@ -11,8 +11,6 @@ logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) -T2M_85TH_PERCENTILE_CLIMATOLOGY_PATH = "gs://brightband-scratch/taylor/climatology/era5_2m_temperature_85th_rolling_by_hour_dayofyear.zarr" - @dataclasses.dataclass class Metric: diff --git a/src/extremeweatherbench/utils.py b/src/extremeweatherbench/utils.py index 4ca5895..317cb5d 100644 --- a/src/extremeweatherbench/utils.py +++ b/src/extremeweatherbench/utils.py @@ -46,10 +46,7 @@ def convert_longitude_to_180( def generate_json_from_nc(u, so, fs, fs_out, json_dir): - """Generate a kerchunk JSON file from a NetCDF file. - - TODO(taylor): Define function signature and docstring. - """ + """Generate a kerchunk JSON file from a NetCDF file.""" with fs.open(u, **so) as infile: h5chunks = SingleHdf5ToZarr(infile, u, inline_threshold=300) @@ -65,9 +62,6 @@ def generate_json_from_nc(u, so, fs, fs_out, json_dir): def clip_dataset_to_bounding_box( - # NOTE(daniel): given its use here, "case.Location" should be moved to this module - # or something else stand-alone; high likelihood of inadvertently introducing a - # circular import dependency here. dataset: xr.Dataset, location_center: Location, length_km: float, @@ -140,8 +134,6 @@ def remove_ocean_gridpoints(dataset: xr.Dataset) -> xr.Dataset: Returns: The dataset masked to only land gridpoints. """ - # TODO(taylor): Extend this so that the user may pass their own land-sea mask, - # best suited the dataset they're analyzing. land = regionmask.defined_regions.natural_earth_v5_0_0.land_110 land_sea_mask = land.mask(dataset.longitude, dataset.latitude) land_mask = land_sea_mask == 0 diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index b6f6b7b..8260b56 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -37,7 +37,6 @@ def test_open_obs_datasets_no_forecast_paths(): evaluate._open_forecast_dataset(invalid_config) -# TODO: test for discordant datetimes not in line with forecast dataset def test_evaluate_base_case(mock_forecast_dataset, mock_gridded_obs_dataset): base_case = case.IndividualCase( id=1,