-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
72589ed
commit 4ba7f59
Showing
3 changed files
with
1 addition
and
99 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
Script to download data set from FAOSTAT website. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,65 +1,8 @@ | ||
"""Downloads all domain data sets from FAOSTAT website.""" | ||
|
||
|
||
from faostat_data_primap.download import ( | ||
download_all_domains, | ||
) | ||
|
||
# def download_all_domains( | ||
# domains: list[tuple[str]] = domains, | ||
# downloaded_data_path: str = downloaded_data_path, | ||
# ) -> list[str]: | ||
# """ | ||
# Download and unpack all climate-related domains from the FAO stat website. | ||
# | ||
# Extract the date when the data set was last updated and create a directory | ||
# with the same name. Download the zip files for each domain if | ||
# it does not already exist. Unpack the zip file and save in | ||
# the same directory. | ||
# | ||
# Parameters | ||
# ---------- | ||
# sources | ||
# Name of data set, url to domain overview, | ||
# and download url | ||
# | ||
# Returns | ||
# ------- | ||
# List of input files that have been fetched or found locally. | ||
# | ||
# """ | ||
# downloaded_files = [] | ||
# for ds_name, urls in domains.items(): | ||
# url = urls["url_domain"] | ||
# url_download = urls["url_download"] | ||
# url_methodology = urls["url_methodology"] | ||
# | ||
# soup = get_html_content(url) | ||
# | ||
# last_updated = get_last_updated_date(soup, url) | ||
# | ||
# if not downloaded_data_path.exists(): | ||
# downloaded_data_path.mkdir() | ||
# | ||
# ds_path = downloaded_data_path / ds_name | ||
# if not ds_path.exists(): | ||
# ds_path.mkdir() | ||
# | ||
# local_data_dir = ds_path / last_updated | ||
# if not local_data_dir.exists(): | ||
# local_data_dir.mkdir() | ||
# | ||
# download_methodology(save_path=local_data_dir, url_download=url_methodology) | ||
# | ||
# local_filename = local_data_dir / f"{ds_name}.zip" | ||
# | ||
# download_file(url_download=url_download, save_path=local_filename) | ||
# | ||
# downloaded_files.append(str(local_filename)) | ||
# | ||
# unzip_file(local_filename) | ||
# | ||
# return downloaded_files | ||
|
||
if __name__ == "__main__": | ||
download_all_domains() |
This file was deleted.
Oops, something went wrong.