Skip to content

Commit

Permalink
Fixed lint
Browse files Browse the repository at this point in the history
Signed-off-by: Elena Khaustova <[email protected]>
  • Loading branch information
ElenaKhaustova committed Jan 7, 2025
1 parent 262c059 commit e65368d
Showing 1 changed file with 54 additions and 54 deletions.
108 changes: 54 additions & 54 deletions kedro-datasets/tests/partitions/test_partitioned_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,17 +65,17 @@ class TestPartitionedDatasetLocal:
def test_repr(self, dataset):
pds = PartitionedDataset(path="", dataset=dataset)
assert (
repr(pds)
== """kedro_datasets.partitions.partitioned_dataset.PartitionedDataset(filepath='', """
"""dataset='kedro_datasets.pandas.parquet_dataset.ParquetDataset()')"""
repr(pds)
== """kedro_datasets.partitions.partitioned_dataset.PartitionedDataset(filepath='', """
"""dataset='kedro_datasets.pandas.parquet_dataset.ParquetDataset()')"""
)

@pytest.mark.parametrize("dataset", LOCAL_DATASET_DEFINITION)
@pytest.mark.parametrize(
"suffix,expected_num_parts", [("", 5), (".csv", 3), ("p4", 1)]
)
def test_load(
self, dataset, local_csvs, partitioned_data_pandas, suffix, expected_num_parts
self, dataset, local_csvs, partitioned_data_pandas, suffix, expected_num_parts
):
pds = PartitionedDataset(
path=str(local_csvs), dataset=dataset, filename_suffix=suffix
Expand Down Expand Up @@ -245,7 +245,7 @@ def test_load_args(self, mocker):
[({"cred": "common"}, {"cred": "common"}, {"cred": "common"}), (None, {}, {})],
)
def test_credentials(
self, mocker, credentials, expected_pds_creds, expected_dataset_creds
self, mocker, credentials, expected_pds_creds, expected_dataset_creds
):
mocked_filesystem = mocker.patch("fsspec.filesystem")
path = str(Path.cwd())
Expand Down Expand Up @@ -296,8 +296,8 @@ def test_invalid_dataset(self, dataset, local_csvs):
df_loader()
error_message = str(exc_info.value)
assert (
"Either the file is corrupted or this is not a parquet file"
in error_message
"Either the file is corrupted or this is not a parquet file"
in error_message
)
assert str(partition) in error_message

Expand All @@ -306,13 +306,13 @@ def test_invalid_dataset(self, dataset, local_csvs):
[
("UndefinedDatasetType", "Class 'UndefinedDatasetType' not found"),
(
"missing.module.UndefinedDatasetType",
r"Class 'missing\.module\.UndefinedDatasetType' not found",
"missing.module.UndefinedDatasetType",
r"Class 'missing\.module\.UndefinedDatasetType' not found",
),
(
FakeDataset,
r"Dataset type 'tests\.partitions\.test_partitioned_dataset\.FakeDataset' "
r"is invalid\: all dataset types must extend 'AbstractDataset'",
FakeDataset,
r"Dataset type 'tests\.partitions\.test_partitioned_dataset\.FakeDataset' "
r"is invalid\: all dataset types must extend 'AbstractDataset'",
),
({}, "'type' is missing from dataset catalog configuration"),
],
Expand All @@ -333,13 +333,13 @@ def test_invalid_dataset_config(self, dataset_config, error_pattern):
"suffix,expected_num_parts", [("", 5), (".csv", 3), ("p4", 1)]
)
def test_versioned_dataset_save_and_load(
self,
mocker,
filepath_csvs,
dataset_config,
suffix,
expected_num_parts,
partitioned_data_pandas,
self,
mocker,
filepath_csvs,
dataset_config,
suffix,
expected_num_parts,
partitioned_data_pandas,
):
"""Test that saved and reloaded data matches the original one for
the versioned dataset."""
Expand Down Expand Up @@ -402,19 +402,19 @@ def test_no_partitions(self, tmpdir):
"pds_config,filepath_arg",
[
(
{
"path": str(Path.cwd()),
"dataset": {"type": CSVDataset, "filepath": "fake_path"},
},
"filepath",
{
"path": str(Path.cwd()),
"dataset": {"type": CSVDataset, "filepath": "fake_path"},
},
"filepath",
),
(
{
"path": str(Path.cwd()),
"dataset": {"type": CSVDataset, "other_arg": "fake_path"},
"filepath_arg": "other_arg",
},
"other_arg",
{
"path": str(Path.cwd()),
"dataset": {"type": CSVDataset, "other_arg": "fake_path"},
"filepath_arg": "other_arg",
},
"other_arg",
),
],
)
Expand Down Expand Up @@ -460,38 +460,38 @@ def test_fs_args_log_warning(self, caplog):
"pds_config,expected_ds_creds,global_creds",
[
(
{"dataset": "pandas.CSVDataset", "credentials": {"secret": "global"}},
{"secret": "global"},
{"secret": "global"},
{"dataset": "pandas.CSVDataset", "credentials": {"secret": "global"}},
{"secret": "global"},
{"secret": "global"},
),
(
{
"dataset": {
"type": CSVDataset,
"credentials": {"secret": "expected"},
},
{
"dataset": {
"type": CSVDataset,
"credentials": {"secret": "expected"},
},
{"secret": "expected"},
{},
},
{"secret": "expected"},
{},
),
(
{
"dataset": {"type": CSVDataset, "credentials": None},
"credentials": {"secret": "global"},
},
None,
{"secret": "global"},
{
"dataset": {"type": CSVDataset, "credentials": None},
"credentials": {"secret": "global"},
},
None,
{"secret": "global"},
),
(
{
"dataset": {
"type": CSVDataset,
"credentials": {"secret": "expected"},
},
"credentials": {"secret": "global"},
{
"dataset": {
"type": CSVDataset,
"credentials": {"secret": "expected"},
},
{"secret": "expected"},
{"secret": "global"},
"credentials": {"secret": "global"},
},
{"secret": "expected"},
{"secret": "global"},
),
],
)
Expand Down

0 comments on commit e65368d

Please sign in to comment.