Skip to content

Commit

Permalink
Tediously update all enum usages to use Enum.KEY.value instead of Enu…
Browse files Browse the repository at this point in the history
…m.KEY, which works on Python<3.11 but broke in 3.11 (see: python/cpython#100458)
  • Loading branch information
gordonhart committed May 10, 2023
1 parent 6093dce commit 3e9baba
Show file tree
Hide file tree
Showing 25 changed files with 113 additions and 114 deletions.
4 changes: 2 additions & 2 deletions kolena/_api/v1/batched_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ class Path(str, Enum):

@classmethod
def upload_signed_url(cls, load_uuid: str) -> str:
return f"{cls.UPLOAD_SIGNED_URL_STUB}/{load_uuid}"
return f"{cls.UPLOAD_SIGNED_URL_STUB.value}/{load_uuid}"

@classmethod
def download_by_path(cls, path: str) -> str:
return f"{cls.DOWNLOAD_BY_PATH_STUB}/{path}"
return f"{cls.DOWNLOAD_BY_PATH_STUB.value}/{path}"

@dataclass(frozen=True)
class WithLoadUUID:
Expand Down
19 changes: 0 additions & 19 deletions kolena/_api/v1/samples.py

This file was deleted.

1 change: 0 additions & 1 deletion kolena/_utils/_consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@


class _BatchSize(int, Enum):
UPLOAD_CHIPS = 5_000
UPLOAD_RECORDS = 10_000_000
UPLOAD_RESULTS = 1_000_000

Expand Down
8 changes: 4 additions & 4 deletions kolena/_utils/batched_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@


def init_upload() -> API.InitiateUploadResponse:
init_res = krequests.put(endpoint_path=API.Path.INIT_UPLOAD)
init_res = krequests.put(endpoint_path=API.Path.INIT_UPLOAD.value)
krequests.raise_for_status(init_res)
init_response = from_dict(data_class=API.InitiateUploadResponse, data=init_res.json())
return init_response
Expand Down Expand Up @@ -81,7 +81,7 @@ def upload_data_frame_chunk(df_chunk: pd.DataFrame, load_uuid: str) -> None:
def upload_image_chips(
df: _ImageChipsDataFrame,
path_mapper: AssetPathMapper,
batch_size: int = _BatchSize.UPLOAD_CHIPS,
batch_size: int = _BatchSize.UPLOAD_CHIPS.value,
) -> None:
def upload_batch(df_batch: _ImageChipsDataFrame) -> None:
df_batch = df_batch.reset_index(drop=True) # reset indices so we match the signed_url indices
Expand All @@ -106,7 +106,7 @@ def as_buffer(image_raw: np.ndarray) -> io.BytesIO:
],
)
upload_response = krequests.put(
endpoint_path=AssetAPI.Path.BULK_UPLOAD,
endpoint_path=AssetAPI.Path.BULK_UPLOAD.value,
data=data,
headers={"Content-Type": data.content_type},
)
Expand Down Expand Up @@ -157,7 +157,7 @@ def complete_load(uuid: Optional[str]) -> None:
return
complete_request = API.CompleteDownloadRequest(uuid=uuid)
complete_res = krequests.put(
endpoint_path=API.Path.COMPLETE_DOWNLOAD,
endpoint_path=API.Path.COMPLETE_DOWNLOAD.value,
data=json.dumps(dataclasses.asdict(complete_request)),
)
krequests.raise_for_status(complete_res)
Expand Down
4 changes: 2 additions & 2 deletions kolena/_utils/instrumentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ def upload_log(message: str, status: str) -> None:
message=message,
status=status,
)
krequests.post(endpoint_path=API.Path.UPLOAD, json=dataclasses.asdict(request))
krequests.post(endpoint_path=API.Path.UPLOAD.value, json=dataclasses.asdict(request))


def log_telemetry(e: BaseException) -> None:
try:
stack = tb.format_stack()
exc_format = tb.format_exception(None, e, e.__traceback__)
combined = stack + exc_format
upload_log("".join(combined), DatadogLogLevels.ERROR)
upload_log("".join(combined), DatadogLogLevels.ERROR.value)
except BaseException:
"""
Attempting to upload the telemetry is best-effort. We don't want to have exceptions in that
Expand Down
2 changes: 1 addition & 1 deletion kolena/_utils/repository.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

def create(repository: str) -> None:
response = krequests.post(
endpoint_path=Path.CREATE,
endpoint_path=Path.CREATE.value,
data=json.dumps(dataclasses.asdict(CreateRepositoryRequest(repository=repository))),
)
krequests.raise_for_status(response)
12 changes: 6 additions & 6 deletions kolena/detection/_internal/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __init__(self, name: str, workflow: WorkflowType, metadata: Optional[Dict[st
def _create(cls, workflow: WorkflowType, name: str, metadata: Dict[str, Any]) -> CoreAPI.EntityData:
log.info(f"creating new model '{name}'")
request = CoreAPI.CreateRequest(name=name, metadata=metadata, workflow=workflow.value)
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
krequests.raise_for_status(res)
log.success(f"created new model '{name}'")
return from_dict(data_class=CoreAPI.EntityData, data=res.json())
Expand All @@ -102,7 +102,7 @@ def _create(cls, workflow: WorkflowType, name: str, metadata: Dict[str, Any]) ->
@validate_arguments(config=ValidatorConfig)
def _load_by_name(cls, name: str) -> CoreAPI.EntityData:
request = CoreAPI.LoadByNameRequest(name=name)
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request)))
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request)))
krequests.raise_for_status(res)
return from_dict(data_class=CoreAPI.EntityData, data=res.json())

Expand Down Expand Up @@ -131,7 +131,7 @@ def iter_inferences(
def _iter_inference_batch_for_reference(
self,
test_object: Union[_TestCaseClass, _TestSuiteClass],
batch_size: int = _BatchSize.LOAD_SAMPLES,
batch_size: int = _BatchSize.LOAD_SAMPLES.value,
) -> Iterator[_LoadInferencesDataFrameClass]:
if batch_size <= 0:
raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer")
Expand All @@ -143,7 +143,7 @@ def _iter_inference_batch_for_reference(
init_request = API.InitLoadInferencesRequest(**params)
yield from _BatchedLoader.iter_data(
init_request=init_request,
endpoint_path=API.Path.INIT_LOAD_INFERENCES,
endpoint_path=API.Path.INIT_LOAD_INFERENCES.value,
df_class=self._LoadInferencesDataFrameClass,
)
log.success(f"loaded inferences from model '{self.name}' on {test_object_display_name}")
Expand All @@ -166,7 +166,7 @@ def load_inferences_by_test_case(
def _iter_inference_batch_for_test_suite(
self,
test_suite: _TestSuiteClass,
batch_size: int = _BatchSize.LOAD_SAMPLES,
batch_size: int = _BatchSize.LOAD_SAMPLES.value,
) -> Iterator[_LoadInferencesDataFrameClass]:
if batch_size <= 0:
raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer")
Expand All @@ -175,7 +175,7 @@ def _iter_inference_batch_for_test_suite(
init_request = API.InitLoadInferencesByTestCaseRequest(**params)
yield from _BatchedLoader.iter_data(
init_request=init_request,
endpoint_path=API.Path.INIT_LOAD_INFERENCES_BY_TEST_CASE,
endpoint_path=API.Path.INIT_LOAD_INFERENCES_BY_TEST_CASE.value,
df_class=self._LoadInferencesDataFrameClass,
)
log.success(f"loaded inferences from model '{self.name}' on test suite '{test_suite.name}'")
Expand Down
12 changes: 6 additions & 6 deletions kolena/detection/_internal/test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def _create(
"""Create a new test case with the provided name."""
log.info(f"creating new test case '{name}'")
request = CoreAPI.CreateRequest(name=name, description=description or "", workflow=workflow.value)
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
krequests.raise_for_status(res)
data = from_dict(data_class=CoreAPI.EntityData, data=res.json())
obj = cls._create_from_data(data)
Expand All @@ -142,7 +142,7 @@ def _create(
def _load_by_name(cls, name: str, version: Optional[int] = None) -> CoreAPI.EntityData:
"""Load an existing test case with the provided name."""
request = CoreAPI.LoadByNameRequest(name=name, version=version)
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request)))
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request)))
krequests.raise_for_status(res)
return from_dict(data_class=CoreAPI.EntityData, data=res.json())

Expand Down Expand Up @@ -173,10 +173,10 @@ def load_images(self) -> List[_TestImageClass]:
def iter_images(self) -> Iterator[_TestImageClass]:
"""Iterate through all images with their associated ground truths in this test case."""
log.info(f"loading test images for test case '{self.name}'")
init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES, test_case_id=self._id)
init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES.value, test_case_id=self._id)
for df in _BatchedLoader.iter_data(
init_request=init_request,
endpoint_path=API.Path.INIT_LOAD_IMAGES,
endpoint_path=API.Path.INIT_LOAD_IMAGES.value,
df_class=self._TestImageDataFrameClass,
):
for record in df.itertuples():
Expand Down Expand Up @@ -312,7 +312,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]:
init_response = init_upload()
df = self._to_data_frame(list(editor._images.values()))
df_serialized = df.as_serializable()
upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS, load_uuid=init_response.uuid)
upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS.value, load_uuid=init_response.uuid)

request = CoreAPI.CompleteEditRequest(
test_case_id=self._id,
Expand All @@ -322,7 +322,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]:
uuid=init_response.uuid,
)
complete_res = krequests.put(
endpoint_path=API.Path.COMPLETE_EDIT,
endpoint_path=API.Path.COMPLETE_EDIT.value,
data=json.dumps(dataclasses.asdict(request)),
)
krequests.raise_for_status(complete_res)
Expand Down
25 changes: 17 additions & 8 deletions kolena/detection/_internal/test_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,10 @@ def __init__(
test_suite_ids=[test_suite._id],
config=config,
)
res = krequests.post(endpoint_path=API.Path.CREATE_OR_RETRIEVE, data=json.dumps(dataclasses.asdict(request)))
res = krequests.post(
endpoint_path=API.Path.CREATE_OR_RETRIEVE.value,
data=json.dumps(dataclasses.asdict(request)),
)
krequests.raise_for_status(res)
response = from_dict(data_class=API.CreateOrRetrieveResponse, data=res.json())
self._id = response.test_run_id
Expand Down Expand Up @@ -128,7 +131,7 @@ def __exit__(
self._submit_custom_metrics()
self._active = False
if exc_type is not None:
report_crash(self._id, API.Path.MARK_CRASHED)
report_crash(self._id, API.Path.MARK_CRASHED.value)

@validate_arguments(config=ValidatorConfig)
def add_inferences(self, image: _TestImageClass, inferences: Optional[List[_InferenceClass]]) -> None:
Expand Down Expand Up @@ -160,7 +163,7 @@ def add_inferences(self, image: _TestImageClass, inferences: Optional[List[_Infe

self._inferences[image_id] = context_image_inferences

if self._n_inferences >= _BatchSize.UPLOAD_RESULTS:
if self._n_inferences >= _BatchSize.UPLOAD_RESULTS.value:
log.info(f"uploading batch of '{self._n_inferences}' inference results")
self._upload_chunk()
log.success(f"uploaded batch of '{self._n_inferences}' inference results")
Expand All @@ -176,7 +179,7 @@ def iter_images(self) -> Iterator[_TestImageClass]:
yield self._image_from_load_image_record(record)

@validate_arguments(config=ValidatorConfig)
def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> List[_TestImageClass]:
def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES.value) -> List[_TestImageClass]:
"""
Returns a list of images that still need inferences evaluated, bounded in count
by batch_size. Note that image ground truths will be excluded from the returned
Expand All @@ -195,7 +198,10 @@ def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> List[_TestIm
return [self._image_from_load_image_record(record) for record in df_image_batch.itertuples()]

@validate_arguments(config=ValidatorConfig)
def _iter_image_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> Iterator[_LoadTestImagesDataFrameClass]:
def _iter_image_batch(
self,
batch_size: int = _BatchSize.LOAD_SAMPLES.value,
) -> Iterator[_LoadTestImagesDataFrameClass]:
if batch_size <= 0:
raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer")
init_request = API.InitLoadRemainingImagesRequest(
Expand All @@ -205,7 +211,7 @@ def _iter_image_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> Iterat
)
yield from _BatchedLoader.iter_data(
init_request=init_request,
endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES,
endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES.value,
df_class=self._LoadTestImagesDataFrameClass,
)

Expand Down Expand Up @@ -239,7 +245,7 @@ def _finalize_upload(self) -> None:
log.info("finalizing inference upload for test run")
request = API.UploadImageResultsRequest(uuid=self._upload_uuid, test_run_id=self._id, reset=self._reset)
finalize_res = krequests.put(
endpoint_path=API.Path.UPLOAD_IMAGE_RESULTS,
endpoint_path=API.Path.UPLOAD_IMAGE_RESULTS.value,
data=json.dumps(dataclasses.asdict(request)),
)
krequests.raise_for_status(finalize_res)
Expand Down Expand Up @@ -289,6 +295,9 @@ def _submit_custom_metrics(self) -> None:
log.info("submitting custom metrics for test run")
custom_metrics = self._compute_custom_metrics()
request = API.UpdateCustomMetricsRequest(model_id=self._model._id, metrics=custom_metrics)
res = krequests.put(endpoint_path=API.Path.UPLOAD_CUSTOM_METRICS, data=json.dumps(dataclasses.asdict(request)))
res = krequests.put(
endpoint_path=API.Path.UPLOAD_CUSTOM_METRICS.value,
data=json.dumps(dataclasses.asdict(request)),
)
krequests.raise_for_status(res)
log.success("submitted custom metrics for test run")
6 changes: 3 additions & 3 deletions kolena/detection/_internal/test_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def _create(
"""Create a new test suite with the provided name."""
log.info(f"creating new test suite '{name}'")
request = CoreAPI.TestSuite.CreateRequest(name=name, description=description or "", workflow=workflow.value)
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
krequests.raise_for_status(res)
data = from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json())
obj = cls._create_from_data(data)
Expand All @@ -127,7 +127,7 @@ def _load_by_name(cls, name: str, version: Optional[int] = None) -> CoreAPI.Test
"""Retrieve the existing test suite with the provided name."""
request = CoreAPI.TestSuite.LoadByNameRequest(name=name, version=version)
data = json.dumps(dataclasses.asdict(request))
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=data)
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=data)
krequests.raise_for_status(res)
return from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json())

Expand Down Expand Up @@ -298,7 +298,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]:
test_case_ids=list(editor._test_cases.values()),
)
data = json.dumps(dataclasses.asdict(request))
res = krequests.post(endpoint_path=API.Path.EDIT, data=data)
res = krequests.post(endpoint_path=API.Path.EDIT.value, data=data)
krequests.raise_for_status(res)
log.success(f"updated test suite '{self.name}'")
test_suite_data = from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json())
Expand Down
4 changes: 2 additions & 2 deletions kolena/detection/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,10 @@ def iter_images(dataset: Optional[str] = None) -> Iterator[TestImage]:
:param dataset: optionally specify the single dataset to be retrieved. By default, images from all
datasets are returned
"""
init_request = API.InitLoadImagesRequest(dataset=dataset, batch_size=_BatchSize.LOAD_RECORDS)
init_request = API.InitLoadImagesRequest(dataset=dataset, batch_size=_BatchSize.LOAD_RECORDS.value)
for df in _BatchedLoader.iter_data(
init_request=init_request,
endpoint_path=API.Path.INIT_LOAD_IMAGES,
endpoint_path=API.Path.INIT_LOAD_IMAGES.value,
df_class=TestImageDataFrame,
):
for record in df.itertuples():
Expand Down
6 changes: 3 additions & 3 deletions kolena/fr/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def create(cls, name: str, metadata: Dict[str, Any]) -> "Model":
"""
log.info(f"creating model '{name}'")
request = API.CreateRequest(name=name, metadata=metadata)
res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request)))
res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request)))
krequests.raise_for_status(res)
log.success(f"created model '{name}'")
return Model.__factory__(from_dict(data_class=Model.Data, data=res.json()))
Expand All @@ -74,7 +74,7 @@ def load_by_name(cls, name: str) -> "Model":
"""
log.info(f"loading model '{name}'")
request = API.LoadByNameRequest(name=name)
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request)))
res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request)))
krequests.raise_for_status(res)
log.success(f"loaded model '{name}'")
return Model.__factory__(from_dict(data_class=Model.Data, data=res.json()))
Expand Down Expand Up @@ -146,7 +146,7 @@ def iter_pair_results(
init_request = API.InitLoadPairResultsRequest(batch_size=batch_size, **base_load_request)
yield from _BatchedLoader.iter_data(
init_request=init_request,
endpoint_path=API.Path.INIT_LOAD_PAIR_RESULTS,
endpoint_path=API.Path.INIT_LOAD_PAIR_RESULTS.value,
df_class=LoadedPairResultDataFrame,
)
log.success(f"loaded pair results from model '{self.data.name}' on {test_object_display_name}")
Expand Down
Loading

0 comments on commit 3e9baba

Please sign in to comment.