From 3e9baba0d29cd8fa8741903234aab1ba13dedb91 Mon Sep 17 00:00:00 2001 From: Gordon Hart Date: Tue, 9 May 2023 20:34:29 -0400 Subject: [PATCH] Tediously update all enum usages to use Enum.KEY.value instead of Enum.KEY, which works on Python<3.11 but broke in 3.11 (see: https://github.com/python/cpython/issues/100458) --- kolena/_api/v1/batched_load.py | 4 +-- kolena/_api/v1/samples.py | 19 ------------- kolena/_utils/_consts.py | 1 - kolena/_utils/batched_load.py | 8 +++--- kolena/_utils/instrumentation.py | 4 +-- kolena/_utils/repository.py | 2 +- kolena/detection/_internal/model.py | 12 ++++---- kolena/detection/_internal/test_case.py | 12 ++++---- kolena/detection/_internal/test_run.py | 25 ++++++++++------ kolena/detection/_internal/test_suite.py | 6 ++-- kolena/detection/test_image.py | 4 +-- kolena/fr/model.py | 6 ++-- kolena/fr/test_case.py | 10 +++---- kolena/fr/test_images.py | 6 ++-- kolena/fr/test_run.py | 21 ++++++++------ kolena/fr/test_suite.py | 6 ++-- kolena/workflow/evaluator_function.py | 2 +- kolena/workflow/model.py | 8 +++--- kolena/workflow/test_case.py | 12 ++++---- kolena/workflow/test_run.py | 33 ++++++++++++++-------- kolena/workflow/test_suite.py | 10 +++---- kolena/workflow/workflow.py | 8 +++--- tests/integration/fr/test_test_run.py | 2 +- tests/integration/generic/test_test_run.py | 2 +- tests/unit/utils/test_endpoints.py | 4 +-- 25 files changed, 113 insertions(+), 114 deletions(-) delete mode 100644 kolena/_api/v1/samples.py diff --git a/kolena/_api/v1/batched_load.py b/kolena/_api/v1/batched_load.py index e3bc94414..6a462bb66 100644 --- a/kolena/_api/v1/batched_load.py +++ b/kolena/_api/v1/batched_load.py @@ -25,11 +25,11 @@ class Path(str, Enum): @classmethod def upload_signed_url(cls, load_uuid: str) -> str: - return f"{cls.UPLOAD_SIGNED_URL_STUB}/{load_uuid}" + return f"{cls.UPLOAD_SIGNED_URL_STUB.value}/{load_uuid}" @classmethod def download_by_path(cls, path: str) -> str: - return f"{cls.DOWNLOAD_BY_PATH_STUB}/{path}" + return f"{cls.DOWNLOAD_BY_PATH_STUB.value}/{path}" @dataclass(frozen=True) class WithLoadUUID: diff --git a/kolena/_api/v1/samples.py b/kolena/_api/v1/samples.py deleted file mode 100644 index c3297df3c..000000000 --- a/kolena/_api/v1/samples.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2021-2023 Kolena Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from enum import Enum - - -class SampleType(str, Enum): - LOCATOR = "LOCATOR" - LOCATOR_TEXT = "LOCATOR_TEXT" diff --git a/kolena/_utils/_consts.py b/kolena/_utils/_consts.py index fbed44f20..dac4a373d 100644 --- a/kolena/_utils/_consts.py +++ b/kolena/_utils/_consts.py @@ -15,7 +15,6 @@ class _BatchSize(int, Enum): - UPLOAD_CHIPS = 5_000 UPLOAD_RECORDS = 10_000_000 UPLOAD_RESULTS = 1_000_000 diff --git a/kolena/_utils/batched_load.py b/kolena/_utils/batched_load.py index 85e10d239..709098e6b 100644 --- a/kolena/_utils/batched_load.py +++ b/kolena/_utils/batched_load.py @@ -45,7 +45,7 @@ def init_upload() -> API.InitiateUploadResponse: - init_res = krequests.put(endpoint_path=API.Path.INIT_UPLOAD) + init_res = krequests.put(endpoint_path=API.Path.INIT_UPLOAD.value) krequests.raise_for_status(init_res) init_response = from_dict(data_class=API.InitiateUploadResponse, data=init_res.json()) return init_response @@ -81,7 +81,7 @@ def upload_data_frame_chunk(df_chunk: pd.DataFrame, load_uuid: str) -> None: def upload_image_chips( df: _ImageChipsDataFrame, path_mapper: AssetPathMapper, - batch_size: int = _BatchSize.UPLOAD_CHIPS, + batch_size: int = _BatchSize.UPLOAD_CHIPS.value, ) -> None: def upload_batch(df_batch: _ImageChipsDataFrame) -> None: df_batch = df_batch.reset_index(drop=True) # reset indices so we match the signed_url indices @@ -106,7 +106,7 @@ def as_buffer(image_raw: np.ndarray) -> io.BytesIO: ], ) upload_response = krequests.put( - endpoint_path=AssetAPI.Path.BULK_UPLOAD, + endpoint_path=AssetAPI.Path.BULK_UPLOAD.value, data=data, headers={"Content-Type": data.content_type}, ) @@ -157,7 +157,7 @@ def complete_load(uuid: Optional[str]) -> None: return complete_request = API.CompleteDownloadRequest(uuid=uuid) complete_res = krequests.put( - endpoint_path=API.Path.COMPLETE_DOWNLOAD, + endpoint_path=API.Path.COMPLETE_DOWNLOAD.value, data=json.dumps(dataclasses.asdict(complete_request)), ) krequests.raise_for_status(complete_res) diff --git a/kolena/_utils/instrumentation.py b/kolena/_utils/instrumentation.py index a0b70ac75..6dd593ab4 100644 --- a/kolena/_utils/instrumentation.py +++ b/kolena/_utils/instrumentation.py @@ -54,7 +54,7 @@ def upload_log(message: str, status: str) -> None: message=message, status=status, ) - krequests.post(endpoint_path=API.Path.UPLOAD, json=dataclasses.asdict(request)) + krequests.post(endpoint_path=API.Path.UPLOAD.value, json=dataclasses.asdict(request)) def log_telemetry(e: BaseException) -> None: @@ -62,7 +62,7 @@ def log_telemetry(e: BaseException) -> None: stack = tb.format_stack() exc_format = tb.format_exception(None, e, e.__traceback__) combined = stack + exc_format - upload_log("".join(combined), DatadogLogLevels.ERROR) + upload_log("".join(combined), DatadogLogLevels.ERROR.value) except BaseException: """ Attempting to upload the telemetry is best-effort. We don't want to have exceptions in that diff --git a/kolena/_utils/repository.py b/kolena/_utils/repository.py index 8ce23a021..17564c2fc 100644 --- a/kolena/_utils/repository.py +++ b/kolena/_utils/repository.py @@ -21,7 +21,7 @@ def create(repository: str) -> None: response = krequests.post( - endpoint_path=Path.CREATE, + endpoint_path=Path.CREATE.value, data=json.dumps(dataclasses.asdict(CreateRepositoryRequest(repository=repository))), ) krequests.raise_for_status(response) diff --git a/kolena/detection/_internal/model.py b/kolena/detection/_internal/model.py index 870017026..d37f29902 100644 --- a/kolena/detection/_internal/model.py +++ b/kolena/detection/_internal/model.py @@ -93,7 +93,7 @@ def __init__(self, name: str, workflow: WorkflowType, metadata: Optional[Dict[st def _create(cls, workflow: WorkflowType, name: str, metadata: Dict[str, Any]) -> CoreAPI.EntityData: log.info(f"creating new model '{name}'") request = CoreAPI.CreateRequest(name=name, metadata=metadata, workflow=workflow.value) - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) log.success(f"created new model '{name}'") return from_dict(data_class=CoreAPI.EntityData, data=res.json()) @@ -102,7 +102,7 @@ def _create(cls, workflow: WorkflowType, name: str, metadata: Dict[str, Any]) -> @validate_arguments(config=ValidatorConfig) def _load_by_name(cls, name: str) -> CoreAPI.EntityData: request = CoreAPI.LoadByNameRequest(name=name) - res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) return from_dict(data_class=CoreAPI.EntityData, data=res.json()) @@ -131,7 +131,7 @@ def iter_inferences( def _iter_inference_batch_for_reference( self, test_object: Union[_TestCaseClass, _TestSuiteClass], - batch_size: int = _BatchSize.LOAD_SAMPLES, + batch_size: int = _BatchSize.LOAD_SAMPLES.value, ) -> Iterator[_LoadInferencesDataFrameClass]: if batch_size <= 0: raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer") @@ -143,7 +143,7 @@ def _iter_inference_batch_for_reference( init_request = API.InitLoadInferencesRequest(**params) yield from _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_INFERENCES, + endpoint_path=API.Path.INIT_LOAD_INFERENCES.value, df_class=self._LoadInferencesDataFrameClass, ) log.success(f"loaded inferences from model '{self.name}' on {test_object_display_name}") @@ -166,7 +166,7 @@ def load_inferences_by_test_case( def _iter_inference_batch_for_test_suite( self, test_suite: _TestSuiteClass, - batch_size: int = _BatchSize.LOAD_SAMPLES, + batch_size: int = _BatchSize.LOAD_SAMPLES.value, ) -> Iterator[_LoadInferencesDataFrameClass]: if batch_size <= 0: raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer") @@ -175,7 +175,7 @@ def _iter_inference_batch_for_test_suite( init_request = API.InitLoadInferencesByTestCaseRequest(**params) yield from _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_INFERENCES_BY_TEST_CASE, + endpoint_path=API.Path.INIT_LOAD_INFERENCES_BY_TEST_CASE.value, df_class=self._LoadInferencesDataFrameClass, ) log.success(f"loaded inferences from model '{self.name}' on test suite '{test_suite.name}'") diff --git a/kolena/detection/_internal/test_case.py b/kolena/detection/_internal/test_case.py index 3def6cf96..c9aafc25b 100644 --- a/kolena/detection/_internal/test_case.py +++ b/kolena/detection/_internal/test_case.py @@ -128,7 +128,7 @@ def _create( """Create a new test case with the provided name.""" log.info(f"creating new test case '{name}'") request = CoreAPI.CreateRequest(name=name, description=description or "", workflow=workflow.value) - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=CoreAPI.EntityData, data=res.json()) obj = cls._create_from_data(data) @@ -142,7 +142,7 @@ def _create( def _load_by_name(cls, name: str, version: Optional[int] = None) -> CoreAPI.EntityData: """Load an existing test case with the provided name.""" request = CoreAPI.LoadByNameRequest(name=name, version=version) - res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) return from_dict(data_class=CoreAPI.EntityData, data=res.json()) @@ -173,10 +173,10 @@ def load_images(self) -> List[_TestImageClass]: def iter_images(self) -> Iterator[_TestImageClass]: """Iterate through all images with their associated ground truths in this test case.""" log.info(f"loading test images for test case '{self.name}'") - init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES, test_case_id=self._id) + init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES.value, test_case_id=self._id) for df in _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_IMAGES, + endpoint_path=API.Path.INIT_LOAD_IMAGES.value, df_class=self._TestImageDataFrameClass, ): for record in df.itertuples(): @@ -312,7 +312,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: init_response = init_upload() df = self._to_data_frame(list(editor._images.values())) df_serialized = df.as_serializable() - upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS, load_uuid=init_response.uuid) + upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS.value, load_uuid=init_response.uuid) request = CoreAPI.CompleteEditRequest( test_case_id=self._id, @@ -322,7 +322,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: uuid=init_response.uuid, ) complete_res = krequests.put( - endpoint_path=API.Path.COMPLETE_EDIT, + endpoint_path=API.Path.COMPLETE_EDIT.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(complete_res) diff --git a/kolena/detection/_internal/test_run.py b/kolena/detection/_internal/test_run.py index dc361872e..e7f0786ad 100644 --- a/kolena/detection/_internal/test_run.py +++ b/kolena/detection/_internal/test_run.py @@ -97,7 +97,10 @@ def __init__( test_suite_ids=[test_suite._id], config=config, ) - res = krequests.post(endpoint_path=API.Path.CREATE_OR_RETRIEVE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post( + endpoint_path=API.Path.CREATE_OR_RETRIEVE.value, + data=json.dumps(dataclasses.asdict(request)), + ) krequests.raise_for_status(res) response = from_dict(data_class=API.CreateOrRetrieveResponse, data=res.json()) self._id = response.test_run_id @@ -128,7 +131,7 @@ def __exit__( self._submit_custom_metrics() self._active = False if exc_type is not None: - report_crash(self._id, API.Path.MARK_CRASHED) + report_crash(self._id, API.Path.MARK_CRASHED.value) @validate_arguments(config=ValidatorConfig) def add_inferences(self, image: _TestImageClass, inferences: Optional[List[_InferenceClass]]) -> None: @@ -160,7 +163,7 @@ def add_inferences(self, image: _TestImageClass, inferences: Optional[List[_Infe self._inferences[image_id] = context_image_inferences - if self._n_inferences >= _BatchSize.UPLOAD_RESULTS: + if self._n_inferences >= _BatchSize.UPLOAD_RESULTS.value: log.info(f"uploading batch of '{self._n_inferences}' inference results") self._upload_chunk() log.success(f"uploaded batch of '{self._n_inferences}' inference results") @@ -176,7 +179,7 @@ def iter_images(self) -> Iterator[_TestImageClass]: yield self._image_from_load_image_record(record) @validate_arguments(config=ValidatorConfig) - def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> List[_TestImageClass]: + def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES.value) -> List[_TestImageClass]: """ Returns a list of images that still need inferences evaluated, bounded in count by batch_size. Note that image ground truths will be excluded from the returned @@ -195,7 +198,10 @@ def load_images(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> List[_TestIm return [self._image_from_load_image_record(record) for record in df_image_batch.itertuples()] @validate_arguments(config=ValidatorConfig) - def _iter_image_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> Iterator[_LoadTestImagesDataFrameClass]: + def _iter_image_batch( + self, + batch_size: int = _BatchSize.LOAD_SAMPLES.value, + ) -> Iterator[_LoadTestImagesDataFrameClass]: if batch_size <= 0: raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer") init_request = API.InitLoadRemainingImagesRequest( @@ -205,7 +211,7 @@ def _iter_image_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> Iterat ) yield from _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES, + endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES.value, df_class=self._LoadTestImagesDataFrameClass, ) @@ -239,7 +245,7 @@ def _finalize_upload(self) -> None: log.info("finalizing inference upload for test run") request = API.UploadImageResultsRequest(uuid=self._upload_uuid, test_run_id=self._id, reset=self._reset) finalize_res = krequests.put( - endpoint_path=API.Path.UPLOAD_IMAGE_RESULTS, + endpoint_path=API.Path.UPLOAD_IMAGE_RESULTS.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(finalize_res) @@ -289,6 +295,9 @@ def _submit_custom_metrics(self) -> None: log.info("submitting custom metrics for test run") custom_metrics = self._compute_custom_metrics() request = API.UpdateCustomMetricsRequest(model_id=self._model._id, metrics=custom_metrics) - res = krequests.put(endpoint_path=API.Path.UPLOAD_CUSTOM_METRICS, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put( + endpoint_path=API.Path.UPLOAD_CUSTOM_METRICS.value, + data=json.dumps(dataclasses.asdict(request)), + ) krequests.raise_for_status(res) log.success("submitted custom metrics for test run") diff --git a/kolena/detection/_internal/test_suite.py b/kolena/detection/_internal/test_suite.py index 798423dc9..6448debff 100644 --- a/kolena/detection/_internal/test_suite.py +++ b/kolena/detection/_internal/test_suite.py @@ -112,7 +112,7 @@ def _create( """Create a new test suite with the provided name.""" log.info(f"creating new test suite '{name}'") request = CoreAPI.TestSuite.CreateRequest(name=name, description=description or "", workflow=workflow.value) - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json()) obj = cls._create_from_data(data) @@ -127,7 +127,7 @@ def _load_by_name(cls, name: str, version: Optional[int] = None) -> CoreAPI.Test """Retrieve the existing test suite with the provided name.""" request = CoreAPI.TestSuite.LoadByNameRequest(name=name, version=version) data = json.dumps(dataclasses.asdict(request)) - res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=data) + res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=data) krequests.raise_for_status(res) return from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json()) @@ -298,7 +298,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: test_case_ids=list(editor._test_cases.values()), ) data = json.dumps(dataclasses.asdict(request)) - res = krequests.post(endpoint_path=API.Path.EDIT, data=data) + res = krequests.post(endpoint_path=API.Path.EDIT.value, data=data) krequests.raise_for_status(res) log.success(f"updated test suite '{self.name}'") test_suite_data = from_dict(data_class=CoreAPI.TestSuite.EntityData, data=res.json()) diff --git a/kolena/detection/test_image.py b/kolena/detection/test_image.py index 70ddc1436..0ab08aaaf 100644 --- a/kolena/detection/test_image.py +++ b/kolena/detection/test_image.py @@ -103,10 +103,10 @@ def iter_images(dataset: Optional[str] = None) -> Iterator[TestImage]: :param dataset: optionally specify the single dataset to be retrieved. By default, images from all datasets are returned """ - init_request = API.InitLoadImagesRequest(dataset=dataset, batch_size=_BatchSize.LOAD_RECORDS) + init_request = API.InitLoadImagesRequest(dataset=dataset, batch_size=_BatchSize.LOAD_RECORDS.value) for df in _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_IMAGES, + endpoint_path=API.Path.INIT_LOAD_IMAGES.value, df_class=TestImageDataFrame, ): for record in df.itertuples(): diff --git a/kolena/fr/model.py b/kolena/fr/model.py index 94c22468c..f8f813b90 100644 --- a/kolena/fr/model.py +++ b/kolena/fr/model.py @@ -58,7 +58,7 @@ def create(cls, name: str, metadata: Dict[str, Any]) -> "Model": """ log.info(f"creating model '{name}'") request = API.CreateRequest(name=name, metadata=metadata) - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) log.success(f"created model '{name}'") return Model.__factory__(from_dict(data_class=Model.Data, data=res.json())) @@ -74,7 +74,7 @@ def load_by_name(cls, name: str) -> "Model": """ log.info(f"loading model '{name}'") request = API.LoadByNameRequest(name=name) - res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) log.success(f"loaded model '{name}'") return Model.__factory__(from_dict(data_class=Model.Data, data=res.json())) @@ -146,7 +146,7 @@ def iter_pair_results( init_request = API.InitLoadPairResultsRequest(batch_size=batch_size, **base_load_request) yield from _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_PAIR_RESULTS, + endpoint_path=API.Path.INIT_LOAD_PAIR_RESULTS.value, df_class=LoadedPairResultDataFrame, ) log.success(f"loaded pair results from model '{self.data.name}' on {test_object_display_name}") diff --git a/kolena/fr/test_case.py b/kolena/fr/test_case.py index 93e21f45b..c91dbe537 100644 --- a/kolena/fr/test_case.py +++ b/kolena/fr/test_case.py @@ -126,7 +126,7 @@ def create( """ log.info(f"creating new test case '{name}'") request = API.CreateRequest(name=name, description=description or "") - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=API.EntityData, data=res.json()) obj = cls._create_from_data(data) @@ -164,7 +164,7 @@ def load_by_name(cls, name: str, version: Optional[int] = None) -> "TestCase": def _load_by_name(cls, name: str, version: Optional[int] = None) -> "TestCase": log.info(f"loading test case '{name}'") request = API.LoadByNameRequest(name=name, version=version) - res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=API.EntityData, data=res.json()) log.success(f"loaded test case '{name}'") @@ -319,7 +319,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: df = pd.DataFrame(editor._samples.values(), columns=TEST_CASE_COLUMNS) df_validated = validate_df_schema(df, TestCaseDataFrameSchema) - upload_data_frame(df=df_validated, batch_size=_BatchSize.UPLOAD_RECORDS, load_uuid=init_response.uuid) + upload_data_frame(df=df_validated, batch_size=_BatchSize.UPLOAD_RECORDS.value, load_uuid=init_response.uuid) request = API.CompleteEditRequest( test_case_id=self._id, current_version=self.version, @@ -328,7 +328,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: uuid=init_response.uuid, ) complete_res = krequests.post( - endpoint_path=API.Path.COMPLETE_EDIT, + endpoint_path=API.Path.COMPLETE_EDIT.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(complete_res) @@ -348,7 +348,7 @@ def iter_data(self, batch_size: int = 10_000_000) -> Iterator[TestCaseDataFrame] init_request = API.InitLoadDataRequest(batch_size=batch_size, test_case_id=self._id) yield from _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_DATA, + endpoint_path=API.Path.INIT_LOAD_DATA.value, df_class=TestCaseDataFrame, ) log.success(f"loaded image pairs in test case '{self.name}'") diff --git a/kolena/fr/test_images.py b/kolena/fr/test_images.py index 4d3af3aef..d5b873e39 100644 --- a/kolena/fr/test_images.py +++ b/kolena/fr/test_images.py @@ -184,10 +184,10 @@ def register(cls) -> Iterator[Registrar]: df_validated = TestImageDataFrame(validate_df_schema(df, TestImageDataFrameSchema)) df_serializable = df_validated.as_serializable() - upload_data_frame(df=df_serializable, batch_size=_BatchSize.UPLOAD_RECORDS, load_uuid=init_response.uuid) + upload_data_frame(df=df_serializable, batch_size=_BatchSize.UPLOAD_RECORDS.value, load_uuid=init_response.uuid) request = LoadAPI.WithLoadUUID(uuid=init_response.uuid) finalize_res = krequests.put( - endpoint_path=API.Path.COMPLETE_REGISTER, + endpoint_path=API.Path.COMPLETE_REGISTER.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(finalize_res) @@ -228,7 +228,7 @@ def iter( ) yield from _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_REQUEST, + endpoint_path=API.Path.INIT_LOAD_REQUEST.value, df_class=TestImageDataFrame, ) log.success(f"loaded test images{from_extra}") diff --git a/kolena/fr/test_run.py b/kolena/fr/test_run.py index b349bf80d..82e62e140 100644 --- a/kolena/fr/test_run.py +++ b/kolena/fr/test_run.py @@ -82,7 +82,10 @@ def __init__(self, model: Model, test_suite: TestSuite, reset: bool = False): log.info("reset flag is disabled. update existing inferences by enabling the reset flag") request = API.CreateOrRetrieveRequest(model_id=model.data.id, test_suite_ids=[test_suite._id], reset=reset) - res = krequests.post(endpoint_path=API.Path.CREATE_OR_RETRIEVE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post( + endpoint_path=API.Path.CREATE_OR_RETRIEVE.value, + data=json.dumps(dataclasses.asdict(request)), + ) krequests.raise_for_status(res) response = from_dict(data_class=TestRun.Data, data=res.json()) @@ -127,7 +130,7 @@ def load_remaining_images(self, batch_size: int = 10_000_000) -> ImageDataFrame: load_all=self._reset, ) with krequests.put( - endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES, + endpoint_path=API.Path.INIT_LOAD_REMAINING_IMAGES.value, data=json.dumps(dataclasses.asdict(init_request)), stream=True, ) as init_res: @@ -165,7 +168,7 @@ def upload_image_results(self, df_image_result: ImageResultDataFrame) -> int: log.info("uploading inference results for test run") init_response = init_upload() - asset_config_res = krequests.get(endpoint_path=AssetAPI.Path.CONFIG) + asset_config_res = krequests.get(endpoint_path=AssetAPI.Path.CONFIG.value) krequests.raise_for_status(asset_config_res) asset_config = from_dict(data_class=AssetAPI.Config, data=asset_config_res.json()) asset_path_mapper = AssetPathMapper(asset_config) @@ -184,11 +187,11 @@ def upload_image_results(self, df_image_result: ImageResultDataFrame) -> int: df=df_validated, path_mapper=asset_path_mapper, ) - upload_data_frame(df_result_stage, _BatchSize.UPLOAD_RECORDS, init_response.uuid) + upload_data_frame(df_result_stage, _BatchSize.UPLOAD_RECORDS.value, init_response.uuid) request = API.UploadImageResultsRequest(uuid=init_response.uuid, test_run_id=self.data.id, reset=self._reset) finalize_res = krequests.put( - endpoint_path=API.Path.COMPLETE_UPLOAD_IMAGE_RESULTS, + endpoint_path=API.Path.COMPLETE_UPLOAD_IMAGE_RESULTS.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(finalize_res) @@ -223,7 +226,7 @@ def load_remaining_pairs(self, batch_size: int = 10_000_000) -> Tuple[EmbeddingD load_all=self._reset, ) with krequests.put( - endpoint_path=API.Path.INIT_LOAD_REMAINING_PAIRS, + endpoint_path=API.Path.INIT_LOAD_REMAINING_PAIRS.value, data=json.dumps(dataclasses.asdict(init_request)), stream=True, ) as init_res: @@ -280,11 +283,11 @@ def upload_pair_results(self, df_pair_result: PairResultDataFrame) -> int: df_validated = validate_df_schema(df_pair_result, PairResultDataFrameSchema) validate_df_record_count(df_validated) - upload_data_frame(df_validated, _BatchSize.UPLOAD_RECORDS, init_response.uuid) + upload_data_frame(df_validated, _BatchSize.UPLOAD_RECORDS.value, init_response.uuid) request = API.UploadPairResultsRequest(uuid=init_response.uuid, test_run_id=self.data.id, reset=self._reset) finalize_res = krequests.put( - endpoint_path=API.Path.COMPLETE_UPLOAD_PAIR_RESULTS, + endpoint_path=API.Path.COMPLETE_UPLOAD_PAIR_RESULTS.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(finalize_res) @@ -339,5 +342,5 @@ def test(model: InferenceModel, test_suite: TestSuite, reset: bool = False) -> N log.success("completed test run") except Exception as e: - report_crash(test_run.data.id, API.Path.MARK_CRASHED) + report_crash(test_run.data.id, API.Path.MARK_CRASHED.value) raise e diff --git a/kolena/fr/test_suite.py b/kolena/fr/test_suite.py index 6b57d1078..929618d1a 100644 --- a/kolena/fr/test_suite.py +++ b/kolena/fr/test_suite.py @@ -124,7 +124,7 @@ def create( """ log.info(f"creating test suite '{name}'") request = API.CreateRequest(name=name, description=description or "") - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=API.EntityData, data=res.json()) obj = cls._create_from_data(data) @@ -162,7 +162,7 @@ def load_by_name(cls, name: str, version: Optional[int] = None) -> "TestSuite": def _load_by_name(cls, name: str, version: Optional[int] = None) -> "TestSuite": log.info(f"loading test suite '{name}'") request = API.LoadByNameRequest(name=name, version=version) - res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD_BY_NAME.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) log.success(f"loaded test suite '{name}'") return cls._create_from_data(from_dict(data_class=API.EntityData, data=res.json())) @@ -336,7 +336,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: baseline_test_case_ids=list(editor._baseline_test_cases.values()), non_baseline_test_case_ids=list(editor._non_baseline_test_cases.values()), ) - res = krequests.post(endpoint_path=API.Path.EDIT, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.EDIT.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) test_suite_data = from_dict(data_class=API.EntityData, data=res.json()) self._populate_from_other(self._create_from_data(test_suite_data)) diff --git a/kolena/workflow/evaluator_function.py b/kolena/workflow/evaluator_function.py index f30d0e945..3074d3222 100644 --- a/kolena/workflow/evaluator_function.py +++ b/kolena/workflow/evaluator_function.py @@ -181,7 +181,7 @@ def _update_progress(self, test_case: TestCase) -> None: message=message, ) res = krequests.put( - endpoint_path=API.Path.UPDATE_METRICS_STATUS, + endpoint_path=API.Path.UPDATE_METRICS_STATUS.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(res) diff --git a/kolena/workflow/model.py b/kolena/workflow/model.py index fe2273504..72ffcc738 100644 --- a/kolena/workflow/model.py +++ b/kolena/workflow/model.py @@ -114,7 +114,7 @@ def create( log.info(f"creating model '{name}'") metadata = metadata or {} request = CoreAPI.CreateRequest(name=name, metadata=metadata, workflow=cls.workflow.name) - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) log.success(f"created model '{name}'") return cls._from_data_with_infer(from_dict(data_class=CoreAPI.EntityData, data=res.json()), infer) @@ -128,7 +128,7 @@ def load(cls, name: str, infer: Optional[Callable[[TestSample], Inference]] = No :param infer: optional inference function for this model. """ request = CoreAPI.LoadByNameRequest(name=name) - res = krequests.put(endpoint_path=API.Path.LOAD, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) return cls._from_data_with_infer(from_dict(data_class=CoreAPI.EntityData, data=res.json()), infer) @@ -156,9 +156,9 @@ def iter_inferences(self, test_case: TestCase) -> Iterator[Tuple[TestSample, Gro init_request=API.LoadInferencesRequest( model_id=self._id, test_case_id=test_case._id, - batch_size=_BatchSize.LOAD_SAMPLES, + batch_size=_BatchSize.LOAD_SAMPLES.value, ), - endpoint_path=API.Path.LOAD_INFERENCES, + endpoint_path=API.Path.LOAD_INFERENCES.value, df_class=TestSampleDataFrame, ): for record in df_batch.itertuples(): diff --git a/kolena/workflow/test_case.py b/kolena/workflow/test_case.py index 1bb06d6e4..7f1d2601b 100644 --- a/kolena/workflow/test_case.py +++ b/kolena/workflow/test_case.py @@ -158,7 +158,7 @@ def create( log.info(f"creating new test case '{name}'") cls._validate_test_samples(test_samples) request = CoreAPI.CreateRequest(name=name, description=description or "", workflow=cls.workflow.name) - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=CoreAPI.EntityData, data=res.json()) obj = cls._create_from_data(data) @@ -178,7 +178,7 @@ def load(cls, name: str, version: Optional[int] = None) -> "TestCase": :return: the loaded test case. """ request = CoreAPI.LoadByNameRequest(name=name, version=version) - res = krequests.put(endpoint_path=API.Path.LOAD, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=CoreAPI.EntityData, data=res.json()) return cls._create_from_data(data) @@ -192,10 +192,10 @@ def iter_test_samples(self) -> Iterator[Tuple[TestSample, GroundTruth]]: log.info(f"loading test samples in test case '{self.name}'") test_sample_type = self.workflow.test_sample_type ground_truth_type = self.workflow.ground_truth_type - init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES, test_case_id=self._id) + init_request = CoreAPI.InitLoadContentsRequest(batch_size=_BatchSize.LOAD_SAMPLES.value, test_case_id=self._id) for df in _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.INIT_LOAD_TEST_SAMPLES, + endpoint_path=API.Path.INIT_LOAD_TEST_SAMPLES.value, df_class=TestSampleDataFrame, ): has_metadata = "test_sample_metadata" in df.columns @@ -294,7 +294,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: log.info(f"updating test case '{self.name}'") init_response = init_upload() df_serialized = editor._to_data_frame().as_serializable() - upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS, load_uuid=init_response.uuid) + upload_data_frame(df=df_serialized, batch_size=_BatchSize.UPLOAD_RECORDS.value, load_uuid=init_response.uuid) request = CoreAPI.CompleteEditRequest( test_case_id=self._id, @@ -304,7 +304,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: uuid=init_response.uuid, ) complete_res = krequests.put( - endpoint_path=API.Path.COMPLETE_EDIT, + endpoint_path=API.Path.COMPLETE_EDIT.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(complete_res) diff --git a/kolena/workflow/test_run.py b/kolena/workflow/test_run.py index 327a8ff7b..f5b768797 100644 --- a/kolena/workflow/test_run.py +++ b/kolena/workflow/test_run.py @@ -150,7 +150,10 @@ def __init__( evaluator=evaluator_display_name, configurations=api_configurations, ) - res = krequests.put(endpoint_path=API.Path.CREATE_OR_RETRIEVE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put( + endpoint_path=API.Path.CREATE_OR_RETRIEVE.value, + data=json.dumps(dataclasses.asdict(request)), + ) krequests.raise_for_status(res) response = from_dict(data_class=API.CreateOrRetrieveResponse, data=res.json()) self._id = response.test_run_id @@ -175,7 +178,7 @@ def run(self) -> None: self.evaluate() except Exception as e: - report_crash(self._id, API.Path.MARK_CRASHED) + report_crash(self._id, API.Path.MARK_CRASHED.value) raise e def load_test_samples(self) -> List[TestSample]: @@ -207,9 +210,9 @@ def _iter_all_inferences(self) -> Iterator[Tuple[TestSample, GroundTruth, Infere for df_batch in _BatchedLoader.iter_data( init_request=API.LoadTestSampleInferencesRequest( test_run_id=self._id, - batch_size=_BatchSize.LOAD_SAMPLES, + batch_size=_BatchSize.LOAD_SAMPLES.value, ), - endpoint_path=API.Path.LOAD_INFERENCES, + endpoint_path=API.Path.LOAD_INFERENCES.value, df_class=TestSampleDataFrame, ): for record in df_batch.itertuples(): @@ -238,7 +241,10 @@ def upload_inferences(self, inferences: List[Tuple[TestSample, Inference]]) -> N upload_data_frame_chunk(df_serializable, init_response.uuid) request = API.UploadInferencesRequest(uuid=init_response.uuid, test_run_id=self._id, reset=self.reset) - res = krequests.put(endpoint_path=API.Path.UPLOAD_INFERENCES, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put( + endpoint_path=API.Path.UPLOAD_INFERENCES.value, + data=json.dumps(dataclasses.asdict(request)), + ) krequests.raise_for_status(res) def evaluate(self) -> None: @@ -374,7 +380,10 @@ def process_results(results: Optional[EvaluationResults], config: Optional[Evalu log.info("uploading test suite metrics") self._upload_test_suite_metrics(test_suite_metrics) - def _iter_test_samples_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> Iterator[TestSampleDataFrame]: + def _iter_test_samples_batch( + self, + batch_size: int = _BatchSize.LOAD_SAMPLES.value, + ) -> Iterator[TestSampleDataFrame]: if batch_size <= 0: raise InputValidationError(f"invalid batch_size '{batch_size}': expected positive integer") init_request = API.LoadRemainingTestSamplesRequest( @@ -384,7 +393,7 @@ def _iter_test_samples_batch(self, batch_size: int = _BatchSize.LOAD_SAMPLES) -> ) yield from _BatchedLoader.iter_data( init_request=init_request, - endpoint_path=API.Path.LOAD_TEST_SAMPLES, + endpoint_path=API.Path.LOAD_TEST_SAMPLES.value, df_class=TestSampleDataFrame, ) @@ -410,7 +419,7 @@ def _upload_test_sample_metrics( configuration=_maybe_evaluator_configuration_to_api(configuration), ) res = krequests.put( - endpoint_path=API.Path.UPLOAD_TEST_SAMPLE_METRICS, + endpoint_path=API.Path.UPLOAD_TEST_SAMPLE_METRICS.value, data=json.dumps(dataclasses.asdict(request)), ) krequests.raise_for_status(res) @@ -425,7 +434,7 @@ def _upload_test_case_metrics( for config, tc_metrics in tc_metrics_by_config.items() ] df = pd.DataFrame(records, columns=["test_case_id", "configuration_display_name", "metrics"]) - return self._upload_aggregate_metrics(API.Path.UPLOAD_TEST_CASE_METRICS, df) + return self._upload_aggregate_metrics(API.Path.UPLOAD_TEST_CASE_METRICS.value, df) def _upload_test_case_plots( self, @@ -438,7 +447,7 @@ def _upload_test_case_plots( for tc_plot in tc_plots or [] ] df = pd.DataFrame(records, columns=["test_case_id", "configuration_display_name", "metrics"]) - return self._upload_aggregate_metrics(API.Path.UPLOAD_TEST_CASE_PLOTS, df) + return self._upload_aggregate_metrics(API.Path.UPLOAD_TEST_CASE_PLOTS.value, df) def _upload_test_suite_metrics( self, @@ -450,7 +459,7 @@ def _upload_test_suite_metrics( if ts_metrics is not None ] df = pd.DataFrame(records, columns=["configuration_display_name", "metrics"]) - return self._upload_aggregate_metrics(API.Path.UPLOAD_TEST_SUITE_METRICS, df) + return self._upload_aggregate_metrics(API.Path.UPLOAD_TEST_SUITE_METRICS.value, df) def _upload_aggregate_metrics(self, endpoint_path: str, df: pd.DataFrame) -> None: df_validated = MetricsDataFrame(validate_df_schema(df, MetricsDataFrameSchema, trusted=True)) @@ -469,7 +478,7 @@ def _upload_aggregate_metrics(self, endpoint_path: str, df: pd.DataFrame) -> Non def _start_server_side_evaluation(self) -> None: request = API.EvaluateRequest(test_run_id=self._id) - res = krequests.put(endpoint_path=API.Path.EVALUATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.EVALUATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) diff --git a/kolena/workflow/test_suite.py b/kolena/workflow/test_suite.py index 55fb5100c..e86e94d9f 100644 --- a/kolena/workflow/test_suite.py +++ b/kolena/workflow/test_suite.py @@ -154,7 +154,7 @@ def create( log.info(f"creating test suite '{name}'") cls._validate_test_cases(test_cases) request = CoreAPI.CreateRequest(name=name, description=description or "", workflow=cls.workflow.name) - res = krequests.post(endpoint_path=API.Path.CREATE, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.CREATE.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=CoreAPI.EntityData, data=res.json()) obj = cls._create_from_data(data) @@ -174,7 +174,7 @@ def load(cls, name: str, version: Optional[int] = None) -> "TestSuite": :return: the loaded test suite. """ request = CoreAPI.LoadByNameRequest(name=name, version=version) - res = krequests.put(endpoint_path=API.Path.LOAD, data=json.dumps(dataclasses.asdict(request))) + res = krequests.put(endpoint_path=API.Path.LOAD.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) data = from_dict(data_class=CoreAPI.EntityData, data=res.json()) return cls._create_from_data(data) @@ -253,7 +253,7 @@ def edit(self, reset: bool = False) -> Iterator[Editor]: description=editor._description, test_case_ids=[tc._id for tc in editor._test_cases], ) - res = krequests.post(endpoint_path=API.Path.EDIT, data=json.dumps(dataclasses.asdict(request))) + res = krequests.post(endpoint_path=API.Path.EDIT.value, data=json.dumps(dataclasses.asdict(request))) krequests.raise_for_status(res) test_suite_data = from_dict(data_class=CoreAPI.EntityData, data=res.json()) self._populate_from_other(self._create_from_data(test_suite_data)) @@ -264,9 +264,9 @@ def load_test_samples(self) -> List[Tuple[TestCase, List[TestSample]]]: for df_batch in _BatchedLoader.iter_data( init_request=API.LoadTestSamplesRequest( test_suite_id=self._id, - batch_size=_BatchSize.LOAD_SAMPLES, + batch_size=_BatchSize.LOAD_SAMPLES.value, ), - endpoint_path=API.Path.INIT_LOAD_TEST_SAMPLES, + endpoint_path=API.Path.INIT_LOAD_TEST_SAMPLES.value, df_class=TestSuiteTestSamplesDataFrame, ): for record in df_batch.itertuples(): diff --git a/kolena/workflow/workflow.py b/kolena/workflow/workflow.py index 831225d14..bcaa07f6c 100644 --- a/kolena/workflow/workflow.py +++ b/kolena/workflow/workflow.py @@ -202,7 +202,7 @@ def register_evaluator( """ response = krequests.post( - API.Path.EVALUATOR, + API.Path.EVALUATOR.value, json=dict(workflow=workflow, image=image, name=evaluator_name, secret=secret, aws_assume_role=aws_assume_role), ) krequests.raise_for_status(response) @@ -222,7 +222,7 @@ def list_evaluators(workflow: str) -> List[RemoteEvaluator]: :return: list of registered evaluators """ - response = krequests.get(f"{API.Path.EVALUATOR}/{quote(workflow)}") + response = krequests.get(f"{API.Path.EVALUATOR.value}/{quote(workflow)}") krequests.raise_for_status(response) return [ @@ -243,8 +243,8 @@ def get_evaluator(workflow: str, evaluator_name: str, include_secret: bool = Fal """ response = krequests.get( - f"{API.Path.EVALUATOR}/{quote(workflow)}/{quote(evaluator_name)}", - params={"include_secret": include_secret}, + endpoint_path=f"{API.Path.EVALUATOR.value}/{quote(workflow)}/{quote(evaluator_name)}", + params=dict(include_secret=include_secret), ) krequests.raise_for_status(response) diff --git a/tests/integration/fr/test_test_run.py b/tests/integration/fr/test_test_run.py index 13cb6ba7d..a8760d20d 100644 --- a/tests/integration/fr/test_test_run.py +++ b/tests/integration/fr/test_test_run.py @@ -378,7 +378,7 @@ def extract(locator: str) -> np.ndarray: with pytest.raises(RuntimeError): test(model, test_suites[0]) - patched.assert_called_once_with(test_run._id, TestRunAPI.Path.MARK_CRASHED) + patched.assert_called_once_with(test_run._id, TestRunAPI.Path.MARK_CRASHED.value) def test__multi_face(fr_multi_face_test_run: Tuple[Model, TestSuite]) -> None: diff --git a/tests/integration/generic/test_test_run.py b/tests/integration/generic/test_test_run.py index 2f70a0eb3..05ff9b3ff 100644 --- a/tests/integration/generic/test_test_run.py +++ b/tests/integration/generic/test_test_run.py @@ -207,7 +207,7 @@ class MarkCrashedDummyEvaluator(DummyEvaluator): with pytest.raises(RuntimeError): test_run.run() - patched.assert_called_once_with(test_run._id, TestRunAPI.Path.MARK_CRASHED) + patched.assert_called_once_with(test_run._id, TestRunAPI.Path.MARK_CRASHED.value) def test__evaluator__unconfigured( diff --git a/tests/unit/utils/test_endpoints.py b/tests/unit/utils/test_endpoints.py index b6c9f6a35..8e9f6889f 100644 --- a/tests/unit/utils/test_endpoints.py +++ b/tests/unit/utils/test_endpoints.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union from urllib.parse import urlparse import pytest @@ -20,7 +19,6 @@ from kolena._utils.endpoints import _get_platform_url from kolena._utils.endpoints import _get_results_url from kolena._utils.state import _ClientState -from kolena.workflow import Workflow def assert_url_equals(a: str, b: str) -> None: @@ -91,5 +89,5 @@ def test__get_platform_url(client_state: _ClientState, expected: str) -> None: ), ], ) -def test__get_results_url(client_state: _ClientState, workflow: Union[Workflow, WorkflowType], expected: str) -> None: +def test__get_results_url(client_state: _ClientState, workflow: str, expected: str) -> None: assert_url_equals(_get_results_url(client_state, workflow, 1, 2), expected)