diff --git a/.pyproject_generation/pyproject_custom.toml b/.pyproject_generation/pyproject_custom.toml index d738bdb..dafdbd1 100644 --- a/.pyproject_generation/pyproject_custom.toml +++ b/.pyproject_generation/pyproject_custom.toml @@ -1,6 +1,6 @@ [project] name = "ghga_connector" -version = "1.5.2" +version = "1.5.1" description = "GHGA Connector - A CLI client application for interacting with the GHGA system." dependencies = [ "typer~=0.12", diff --git a/README.md b/README.md index 881f4e2..99ee003 100644 --- a/README.md +++ b/README.md @@ -26,13 +26,13 @@ We recommend using the provided Docker container. A pre-build version is available at [docker hub](https://hub.docker.com/repository/docker/ghga/ghga-connector): ```bash -docker pull ghga/ghga-connector:1.5.2 +docker pull ghga/ghga-connector:1.5.1 ``` Or you can build the container yourself from the [`./Dockerfile`](./Dockerfile): ```bash # Execute in the repo's root dir: -docker build -t ghga/ghga-connector:1.5.2 . +docker build -t ghga/ghga-connector:1.5.1 . ``` For production-ready deployment, we recommend using Kubernetes, however, @@ -40,7 +40,7 @@ for simple use cases, you could execute the service using docker on a single server: ```bash # The entrypoint is preconfigured: -docker run -p 8080:8080 ghga/ghga-connector:1.5.2 --help +docker run -p 8080:8080 ghga/ghga-connector:1.5.1 --help ``` If you prefer not to use containers, you may install the service from source: diff --git a/pyproject.toml b/pyproject.toml index c02166e..54f6027 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ classifiers = [ "Intended Audience :: Developers", ] name = "ghga_connector" -version = "1.5.2" +version = "1.5.1" description = "GHGA Connector - A CLI client application for interacting with the GHGA system." dependencies = [ "typer~=0.12", diff --git a/src/ghga_connector/cli.py b/src/ghga_connector/cli.py index a5b0f66..a631566 100644 --- a/src/ghga_connector/cli.py +++ b/src/ghga_connector/cli.py @@ -262,12 +262,17 @@ def download( """Wrapper for the async download function""" asyncio.run( async_download( - output_dir, my_public_key_path, my_private_key_path, debug, overwrite + output_dir=output_dir, + my_public_key_path=my_public_key_path, + my_private_key_path=my_private_key_path, + debug=debug, + overwrite=overwrite, ) ) async def async_download( + *, output_dir: Path, my_public_key_path: Path, my_private_key_path: Path, diff --git a/src/ghga_connector/core/downloading/batch_processing.py b/src/ghga_connector/core/downloading/batch_processing.py index 615b07f..c4b430a 100644 --- a/src/ghga_connector/core/downloading/batch_processing.py +++ b/src/ghga_connector/core/downloading/batch_processing.py @@ -17,7 +17,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass, field from pathlib import Path -from time import sleep, time +from time import perf_counter, sleep import httpx @@ -158,7 +158,7 @@ def __init__( # noqa: PLR0913 self.work_package_accessor = work_package_accessor self.max_wait_time = config.max_wait_time self.client = client - self.started_waiting = now = time() + self.started_waiting = now = perf_counter() # Successfully staged files with their download URLs and sizes # in the beginning, consider all files as staged with a retry time of 0 @@ -183,10 +183,10 @@ async def get_staged_files(self) -> dict[str, URLResponse]: self.message_display.display("Updating list of staged files...") staging_items = list(self.unstaged_retry_times.items()) for file_id, retry_time in staging_items: - if time() >= retry_time: + if perf_counter() >= retry_time: await self._check_file(file_id=file_id) if len(self.staged_urls.items()) > 0: - self.started_waiting = time() # reset wait timer + self.started_waiting = perf_counter() # reset wait timer break if not self.staged_urls and not self._handle_failures(): sleep(1) @@ -223,7 +223,7 @@ async def _check_file(self, file_id: str) -> None: self.staged_urls[file_id] = response self.message_display.display(f"File {file_id} is ready for download.") elif isinstance(response, RetryResponse): - self.unstaged_retry_times[file_id] = time() + response.retry_after + self.unstaged_retry_times[file_id] = perf_counter() + response.retry_after self.message_display.display(f"File {file_id} is (still) being staged.") else: self.missing_files.append(file_id) @@ -233,7 +233,7 @@ def _check_timeout(self): In that cases, a MaxWaitTimeExceededError is raised. """ - if time() - self.started_waiting >= self.max_wait_time: + if perf_counter() - self.started_waiting >= self.max_wait_time: raise exceptions.MaxWaitTimeExceededError(max_wait_time=self.max_wait_time) def _handle_failures(self) -> bool: @@ -256,6 +256,6 @@ def _handle_failures(self) -> bool: response = self.io_handler.get_input(message=unknown_ids_present) self.io_handler.handle_response(response=response) self.message_display.display("Downloading remaining files") - self.started_waiting = time() # reset the timer + self.started_waiting = perf_counter() # reset the timer self.missing_files = [] # reset list of missing files return True diff --git a/tests/integration/test_cli.py b/tests/integration/test_cli.py index fd0787a..90fc1f9 100644 --- a/tests/integration/test_cli.py +++ b/tests/integration/test_cli.py @@ -29,9 +29,6 @@ import crypt4gh.keys import httpx import pytest -from ghga_service_commons.api.mock_router import ( # noqa: F401 - assert_all_responses_were_requested, -) from ghga_service_commons.utils.temp_files import big_temp_file from pytest_httpx import HTTPXMock, httpx_mock # noqa: F401