Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

More tests #13

Merged
merged 19 commits into from
Jul 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion .github/workflows/ci-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ jobs:
run:
shell: bash -l {0}

env:
IRIS_TEST_DATA_VERSION: "2.20"

steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3
Expand Down Expand Up @@ -50,6 +53,14 @@ jobs:
conda info
conda list --explicit

- name: "Fetch Iris-test-data"
run: |
wget --quiet https://github.com/SciTools/iris-test-data/archive/v${IRIS_TEST_DATA_VERSION}.zip -O iris-test-data.zip
unzip -q iris-test-data.zip
mkdir --parents ${GITHUB_WORKSPACE}/iris_test_data
mv iris-test-data-${IRIS_TEST_DATA_VERSION} ${GITHUB_WORKSPACE}/iris_test_data_download

- name: "Run tests"
run: |
PYTHONPATH=./tests:$PYTHONPATH pytest -v ./tests
ls ${GITHUB_WORKSPACE}/iris_test_data_download/test_data
OVERRIDE_TEST_DATA_REPOSITORY=${GITHUB_WORKSPACE}/iris_test_data_download/test_data PYTHONPATH=./tests:$PYTHONPATH pytest -v ./tests
2 changes: 1 addition & 1 deletion lib/ncdata/_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def _print_value(self):

# Convert numpy non-string scalars to simple Python values, in string output.
if getattr(value, "shape", None) in ((0,), (1,), ()):
op = {"i": int, "f": float}[value.dtype.kind]
op = {"i": int, "u": int, "f": float}[value.dtype.kind]
value = op(value.flatten()[0])

return repr(value)
Expand Down
29 changes: 24 additions & 5 deletions lib/ncdata/dataset_like.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ class Nc4DatasetLike(_Nc4DatalikeWithNcattrs):

_local_instance_props = ("_ncdata", "variables", "dimensions")

# Needed for Iris to recognise the dataset format.
file_format = "NETCDF4"

def __init__(self, ncdata: NcData = None):
if ncdata is None:
ncdata = NcData() # an empty dataset
Expand Down Expand Up @@ -134,13 +137,26 @@ def createVariable(
msg = f'creating duplicate variable "{varname}".'
raise ValueError(msg)
# Add a variable into the underlying NcData object.

# N.B. to correctly mirror netCDF4, a variable must be created with all-masked
# content. For this we need to decode the dims + work out the shape.
# NOTE: simplistic version here, as we don't support groups.
shape = tuple(
self._ncdata.dimensions[dim_name].size for dim_name in dimensions
)
# Note: does *not* allocate a full array in memory ...until you modify it.
initial_allmasked_data = np.ma.masked_array(
np.zeros(shape, dtype=datatype), mask=True
)

ncvar = NcVariable(
name=varname,
dimensions=dimensions,
data=initial_allmasked_data,
group=self._ncdata,
)
# Note: initially has no data (or attributes), since this is how netCDF4 expects
# to do it.
# Note: no valid data is initially assigned, since that is how the netCDF4 API
# does it.
self._ncdata.variables[varname] = ncvar
# Create a netCDF4-like "wrapper" variable + install that here.
nc4var = Nc4VariableLike._from_ncvariable(ncvar, dtype=datatype)
Expand All @@ -155,10 +171,9 @@ def close(self): # noqa: D102

@staticmethod
def filepath() -> str: # noqa: D102
#
# Note: for now, let's just not care about this.
# we *might* need this to be an optional defined item on an NcData ??
# .. or, we ight need to store an xarray "encoding" somewhere ?
# .. or, we might need to store an xarray "encoding" somewhere ?
# TODO: more thought here ?
# return self.ncdata.encoding.get("source", "")
return "<Nc4DatasetLike>"
Expand Down Expand Up @@ -225,7 +240,11 @@ def __getitem__(self, keys): # noqa: D105
raise IndexError(keys)
if self.ndim == 0:
return self._ncdata.data
return self._ncdata.data[keys]
array = self._ncdata.data[keys]
if hasattr(array, "compute"):
# When accessed as a data variable, we must realise lazy data.
array = array.compute()
return array

# The __setitem__ is not required for normal saving.
# The saver will assign ._data_array instead
Expand Down
Loading