Skip to content

Commit

Permalink
Turn ObjectStoreLocationProvider off by default (#1722)
Browse files Browse the repository at this point in the history
Closes #1721

Otherwise, there's a default behavior change in new 0.9.0 release.
Previous versions will write to `data/`, new version will write to
`data/<hash>/`
  • Loading branch information
kevinjqliu authored Feb 25, 2025
1 parent 71129ee commit 1d24e71
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 8 deletions.
2 changes: 1 addition & 1 deletion pyiceberg/table/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ class TableProperties:
WRITE_PY_LOCATION_PROVIDER_IMPL = "write.py-location-provider.impl"

OBJECT_STORE_ENABLED = "write.object-storage.enabled"
OBJECT_STORE_ENABLED_DEFAULT = True
OBJECT_STORE_ENABLED_DEFAULT = False

WRITE_OBJECT_STORE_PARTITIONED_PATHS = "write.object-storage.partitioned-paths"
WRITE_OBJECT_STORE_PARTITIONED_PATHS_DEFAULT = True
Expand Down
9 changes: 5 additions & 4 deletions tests/integration/test_writes/test_partitioned_writes.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,13 +294,14 @@ def test_object_storage_location_provider_excludes_partition_path(
PartitionField(source_id=nested_field.field_id, field_id=1001, transform=IdentityTransform(), name=part_col)
)

# write.object-storage.enabled and write.object-storage.partitioned-paths don't need to be specified as they're on by default
assert TableProperties.OBJECT_STORE_ENABLED_DEFAULT
assert TableProperties.WRITE_OBJECT_STORE_PARTITIONED_PATHS_DEFAULT
# Enable `write.object-storage.enabled` which is False by default
# `write.object-storage.partitioned-paths` is True by default
assert TableProperties.OBJECT_STORE_ENABLED_DEFAULT is False
assert TableProperties.WRITE_OBJECT_STORE_PARTITIONED_PATHS_DEFAULT is True
tbl = _create_table(
session_catalog=session_catalog,
identifier=f"default.arrow_table_v{format_version}_with_null_partitioned_on_col_{part_col}",
properties={"format-version": str(format_version)},
properties={"format-version": str(format_version), TableProperties.OBJECT_STORE_ENABLED: True},
data=[arrow_table_with_null],
partition_spec=partition_spec,
)
Expand Down
10 changes: 7 additions & 3 deletions tests/table/test_locations.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def test_custom_location_provider_not_found(caplog: Any) -> None:


def test_object_storage_no_partition() -> None:
provider = load_location_provider(table_location="table_location", table_properties=EMPTY_DICT)
provider = load_location_provider(table_location="table_location", table_properties={"write.object-storage.enabled": "true"})

location = provider.new_data_location("test.parquet")
parts = location.split("/")
Expand Down Expand Up @@ -111,6 +111,7 @@ def test_object_storage_partitioned_paths_disabled(partition_key: Optional[Parti
provider = load_location_provider(
table_location="table_location",
table_properties={
"write.object-storage.enabled": "true",
"write.object-storage.partitioned-paths": "false",
},
)
Expand All @@ -131,15 +132,18 @@ def test_object_storage_partitioned_paths_disabled(partition_key: Optional[Parti
],
)
def test_hash_injection(data_file_name: str, expected_hash: str) -> None:
provider = load_location_provider(table_location="table_location", table_properties=EMPTY_DICT)
provider = load_location_provider(table_location="table_location", table_properties={"write.object-storage.enabled": "true"})

assert provider.new_data_location(data_file_name) == f"table_location/data/{expected_hash}/{data_file_name}"


def test_object_location_provider_write_data_path() -> None:
provider = load_location_provider(
table_location="s3://table-location/table",
table_properties={TableProperties.WRITE_DATA_PATH: "s3://table-location/custom/data/path"},
table_properties={
"write.object-storage.enabled": "true",
TableProperties.WRITE_DATA_PATH: "s3://table-location/custom/data/path",
},
)

assert (
Expand Down

0 comments on commit 1d24e71

Please sign in to comment.