diff --git a/src/swell/__init__.py b/src/swell/__init__.py index 6555fecc..8e89cac8 100644 --- a/src/swell/__init__.py +++ b/src/swell/__init__.py @@ -9,4 +9,4 @@ repo_directory = os.path.dirname(__file__) # Set the version for swell -__version__ = '1.9.0' +__version__ = '1.9.5' diff --git a/src/swell/configuration/jedi/interfaces/geos_atmosphere/suite_questions.yaml b/src/swell/configuration/jedi/interfaces/geos_atmosphere/suite_questions.yaml index 9db1cd1b..f3fc1d66 100644 --- a/src/swell/configuration/jedi/interfaces/geos_atmosphere/suite_questions.yaml +++ b/src/swell/configuration/jedi/interfaces/geos_atmosphere/suite_questions.yaml @@ -1,3 +1,9 @@ cycle_times: default_value: ['T00', 'T06', 'T12', 'T18'] options: ['T00', 'T06', 'T12', 'T18'] + +ensemble_hofx_strategy: + default_value: 'serial' + +ensemble_hofx_packets: + default_value: 1 diff --git a/src/swell/configuration/jedi/interfaces/geos_atmosphere/task_questions.yaml b/src/swell/configuration/jedi/interfaces/geos_atmosphere/task_questions.yaml index 5bdd1ce9..92e50162 100644 --- a/src/swell/configuration/jedi/interfaces/geos_atmosphere/task_questions.yaml +++ b/src/swell/configuration/jedi/interfaces/geos_atmosphere/task_questions.yaml @@ -84,6 +84,7 @@ horizontal_localization_method: horizontal_resolution: default_value: '361' options: + - '91' - '361' jedi_bkg_filename_template: diff --git a/src/swell/configuration/jedi/interfaces/geos_ocean/suite_questions.yaml b/src/swell/configuration/jedi/interfaces/geos_ocean/suite_questions.yaml index f29c4a4c..2bf14f85 100644 --- a/src/swell/configuration/jedi/interfaces/geos_ocean/suite_questions.yaml +++ b/src/swell/configuration/jedi/interfaces/geos_ocean/suite_questions.yaml @@ -1,3 +1,9 @@ cycle_times: default_value: ['T00', 'T12'] options: ['T00', 'T12'] + +ensemble_hofx_strategy: + default_value: 'serial' + +ensemble_hofx_packets: + default_value: 1 diff --git a/src/swell/configuration/jedi/interfaces/geos_ocean/task_questions.yaml b/src/swell/configuration/jedi/interfaces/geos_ocean/task_questions.yaml index 58283628..90ff2efc 100644 --- a/src/swell/configuration/jedi/interfaces/geos_ocean/task_questions.yaml +++ b/src/swell/configuration/jedi/interfaces/geos_ocean/task_questions.yaml @@ -1,5 +1,7 @@ analysis_forecast_window_offset: default_value: -PT6H + options: + - -PT12H analysis_variables: default_value: @@ -7,6 +9,11 @@ analysis_variables: - tocn - ssh - hocn + options: + - socn + - tocn + - ssh + - hocn background_error_model: default_value: explicit_diffusion @@ -21,25 +28,28 @@ background_frequency: background_time_offset: default_value: PT9H + options: + - PT9H clean_patterns: default_value: - '*.nc4' - '*.txt' - '*MOM*' + options: - logfile.*.out - background_error_model/*.nc + - '*.nc4' + - '*.txt' + - '*MOM*' gradient_norm_reduction: default_value: 1e-10 -gsibec_configuration: - default_value: - - None - horizontal_resolution: default_value: 1440x1080 options: + - 72x36 - 360x320 - 1440x1080 @@ -78,6 +88,16 @@ observations: - adt - insitus - insitut + - adt_3a_egm2008 + - adt_3b_egm2008 + - adt_c2_egm2008 + - adt_j3_egm2008 + - adt_sa_egm2008 + - salt_profile_fnmoc + - sss_smos_esa + - sst_gmi_l3u + - sst_ship_fnmoc + - temp_profile_fnmoc total_processors: default_value: 24 @@ -85,6 +105,7 @@ total_processors: vertical_resolution: default_value: '75' options: + - '50' - '75' window_length: diff --git a/src/swell/deployment/create_experiment.py b/src/swell/deployment/create_experiment.py index 781048ee..a08c3138 100644 --- a/src/swell/deployment/create_experiment.py +++ b/src/swell/deployment/create_experiment.py @@ -10,12 +10,13 @@ import copy import datetime -import importlib import os import shutil import sys import yaml +from swell.deployment.prepare_config_and_suite.prepare_config_and_suite import \ + PrepareExperimentConfigAndSuite from swell.swell_path import get_swell_path from swell.utilities.dictionary import add_comments_to_dictionary, dict_get from swell.utilities.jinja2 import template_string_jinja2 @@ -64,10 +65,6 @@ def prepare_config(suite, method, platform, override, advanced, slurm): # --------------- logger = Logger('SwellPrepSuiteConfig') - # Starting point for configuration generation - # ------------------------------------------- - config_file = os.path.join(get_swell_path(), 'suites', 'suite_questions.yaml') - # Assert valid method # ------------------- valid_tasks = ['defaults', 'cli'] @@ -77,24 +74,27 @@ def prepare_config(suite, method, platform, override, advanced, slurm): # Set the object that will be used to populate dictionary options # --------------------------------------------------------------- - PrepUsing = getattr(importlib.import_module('swell.deployment.prep_config_'+method), - 'PrepConfig'+method.capitalize()) - prep_using = PrepUsing(logger, config_file, suite, platform, override, advanced) - - # Call the config prep step - # ------------------------- - prep_using.execute() - - # Copy the experiment dictionary - # ------------------------------ - experiment_dict = prep_using.experiment_dict - comment_dict = prep_using.comment_dict + prepare_config_and_suite = PrepareExperimentConfigAndSuite(logger, suite, platform, + method, override) + # Ask questions as the suite gets configured + # ------------------------------------------ + experiment_dict, comment_dict = prepare_config_and_suite.ask_questions_and_configure_suite() # Add the datetime to the dictionary # ---------------------------------- experiment_dict['datetime_created'] = datetime.datetime.today().strftime("%Y%m%d_%H%M%SZ") comment_dict['datetime_created'] = 'Datetime this file was created (auto added)' + # Add the platform the dictionary + # ------------------------------- + experiment_dict['platform'] = platform + comment_dict['platform'] = 'Computing platform to run the experiment' + + # Add the suite_to_run to the dictionary + # -------------------------------------- + experiment_dict['suite_to_run'] = suite + comment_dict['suite_to_run'] = 'Record of the suite being executed' + # Add the model components to the dictionary # ------------------------------------------ if 'models' in experiment_dict: @@ -131,7 +131,7 @@ def prepare_config(suite, method, platform, override, advanced, slurm): # -------------------------- experiment_dict_string = yaml.dump(experiment_dict, default_flow_style=False, sort_keys=False) - experiment_dict_string_comments = add_comments_to_dictionary(experiment_dict_string, + experiment_dict_string_comments = add_comments_to_dictionary(logger, experiment_dict_string, comment_dict) # Return path to dictionary file @@ -142,22 +142,24 @@ def prepare_config(suite, method, platform, override, advanced, slurm): # -------------------------------------------------------------------------------------------------- -def create_experiment_directory(experiment_dict_str): +def create_experiment_directory(suite, method, platform, override, advanced, slurm): # Create a logger # --------------- logger = Logger('SwellCreateExperiment') + # Call the experiment config and suite generation + # ------------------------------------------------ + experiment_dict_str = prepare_config(suite, method, platform, override, advanced, slurm) + # Load the string using yaml # -------------------------- experiment_dict = yaml.safe_load(experiment_dict_str) - # Extract from the config - # ----------------------- + # Experiment ID and root from the user input + # ------------------------------------------ experiment_id = dict_get(logger, experiment_dict, 'experiment_id') experiment_root = dict_get(logger, experiment_dict, 'experiment_root') - platform = dict_get(logger, experiment_dict, 'platform', None) - suite_to_run = dict_get(logger, experiment_dict, 'suite_to_run') # Write out some info # ------------------- @@ -175,42 +177,26 @@ def create_experiment_directory(experiment_dict_str): with open(os.path.join(exp_suite_path, 'experiment.yaml'), 'w') as file: file.write(experiment_dict_str) + # At this point we need to write the complete suite file with all templates resolved. Call the + # function to build the scheduling dictionary, combine with the experiment dictionary, + # resolve the templates and write the suite file to the experiment suite directory. + # -------------------------------------------------------------------------------------------- + swell_suite_path = os.path.join(get_swell_path(), 'suites', suite) + prepare_cylc_suite_jinja2(logger, swell_suite_path, exp_suite_path, experiment_dict) + # Copy suite and platform files to experiment suite directory # ----------------------------------------------------------- - swell_suite_path = os.path.join(get_swell_path(), 'suites', suite_to_run) + swell_suite_path = os.path.join(get_swell_path(), 'suites', suite) copy_platform_files(logger, exp_suite_path, platform) if os.path.exists(os.path.join(swell_suite_path, 'eva')): copy_eva_files(logger, swell_suite_path, exp_suite_path) - # Create R2D2 database file - # ------------------------- - r2d2_local_path = dict_get(logger, experiment_dict, 'r2d2_local_path', None) - if r2d2_local_path is not None: - r2d2_conf_path = os.path.join(exp_suite_path, 'r2d2_config.yaml') - - # Write R2D2_CONFIG to modules - with open(os.path.join(exp_suite_path, 'modules'), 'a') as module_file: - module_file.write(f'export R2D2_CONFIG={r2d2_conf_path}') - - # Open the r2d2 file to dictionary - with open(r2d2_conf_path, 'r') as r2d2_file_open: - r2d2_file_str = r2d2_file_open.read() - r2d2_file_str = template_string_jinja2(logger, r2d2_file_str, experiment_dict) - r2d2_file_str = os.path.expandvars(r2d2_file_str) - - with open(r2d2_conf_path, 'w') as r2d2_file_open: - r2d2_file_open.write(r2d2_file_str) - # Set the swell paths in the modules file and create csh versions # --------------------------------------------------------------- template_modules_file(logger, experiment_dict, exp_suite_path) create_modules_csh(logger, exp_suite_path) - # Set the jinja2 file for cylc - # ---------------------------- - prepare_cylc_suite_jinja2(logger, swell_suite_path, exp_suite_path, experiment_dict) - # Copy config directory to experiment # ----------------------------------- src = os.path.join(get_swell_path(), 'configuration') @@ -258,7 +244,7 @@ def copy_platform_files(logger, exp_suite_path, platform=None): swell_lib_path = get_swell_path() platform_path = os.path.join(swell_lib_path, 'deployment', 'platforms', platform) - for s in ['modules', 'r2d2_config.yaml']: + for s in ['modules']: src_file = os.path.split(s)[1] src_path_file = os.path.join(platform_path, os.path.split(s)[0], src_file) dst_path_file = os.path.join(exp_suite_path, '{}'.format(src_file)) @@ -380,23 +366,7 @@ def prepare_cylc_suite_jinja2(logger, swell_suite_path, exp_suite_path, experime # Copy the experiment dictionary to the rendering dictionary # ---------------------------------------------------------- - render_dictionary = {} - - # Elements to copy from the experiment dictionary - # ----------------------------------------------- - render_elements = [ - 'start_cycle_point', - 'final_cycle_point', - 'runahead_limit', - 'model_components', - 'platform', - ] - - # Copy elements from experiment dictionary to render dictionary - # ------------------------------------------------------------- - for element in render_elements: - if element in experiment_dict: - render_dictionary[element] = experiment_dict[element] + render_dictionary = copy.deepcopy(experiment_dict) # Get unique list of cycle times with model flags to render dictionary # -------------------------------------------------------------------- @@ -469,7 +439,7 @@ def prepare_cylc_suite_jinja2(logger, swell_suite_path, exp_suite_path, experime # Render the template # ------------------- - new_suite_file = template_string_jinja2(logger, suite_file, render_dictionary) + new_suite_file = template_string_jinja2(logger, suite_file, render_dictionary, False) # Write suite file to experiment # ------------------------------ diff --git a/src/swell/deployment/platforms/generic/suite_questions.yaml b/src/swell/deployment/platforms/generic/suite_questions.yaml index 33892d3a..dfc5f441 100644 --- a/src/swell/deployment/platforms/generic/suite_questions.yaml +++ b/src/swell/deployment/platforms/generic/suite_questions.yaml @@ -1,8 +1,2 @@ -experiment_id: - default_value: swell-{{suite_to_run}} - experiment_root: default_value: $HOME/SwellExperiments - -r2d2_local_path: - default_value: $HOME/R2D2DataStore/Local diff --git a/src/swell/deployment/platforms/generic/task_questions.yaml b/src/swell/deployment/platforms/generic/task_questions.yaml index 6bb85dac..a47bf13d 100644 --- a/src/swell/deployment/platforms/generic/task_questions.yaml +++ b/src/swell/deployment/platforms/generic/task_questions.yaml @@ -21,6 +21,9 @@ geos_restarts_directory: default_value: - /home/geos/restarts +r2d2_local_path: + default_value: $HOME/R2D2DataStore/Local + swell_static_files: default_value: This would need to be provided by user diff --git a/src/swell/deployment/platforms/nccs_discover/suite_questions.yaml b/src/swell/deployment/platforms/nccs_discover/suite_questions.yaml index 5344ca69..43a7c260 100644 --- a/src/swell/deployment/platforms/nccs_discover/suite_questions.yaml +++ b/src/swell/deployment/platforms/nccs_discover/suite_questions.yaml @@ -1,5 +1,2 @@ experiment_root: default_value: /discover/nobackup/${USER}/SwellExperiments - -r2d2_local_path: - default_value: /discover/nobackup/${USER}/R2D2DataStore/Local diff --git a/src/swell/deployment/platforms/nccs_discover/task_questions.yaml b/src/swell/deployment/platforms/nccs_discover/task_questions.yaml index 2ff402c3..46e69bcb 100644 --- a/src/swell/deployment/platforms/nccs_discover/task_questions.yaml +++ b/src/swell/deployment/platforms/nccs_discover/task_questions.yaml @@ -19,6 +19,9 @@ geos_experiment_directory: geos_restarts_directory: default_value: restarts_20210601_030000 +r2d2_local_path: + default_value: /discover/nobackup/${USER}/R2D2DataStore/Local + swell_static_files: default_value: /discover/nobackup/projects/gmao/advda/SwellStaticFiles diff --git a/src/swell/deployment/prep_config_base.py b/src/swell/deployment/prep_config_base.py deleted file mode 100644 index 69d9c427..00000000 --- a/src/swell/deployment/prep_config_base.py +++ /dev/null @@ -1,606 +0,0 @@ -# (C) Copyright 2021- United States Government as represented by the Administrator of the -# National Aeronautics and Space Administration. All Rights Reserved. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. - - -# -------------------------------------------------------------------------------------------------- - - -from abc import ABC, abstractmethod -import copy -import datetime -import os -import yaml - -from swell.swell_path import get_swell_path - - -# -------------------------------------------------------------------------------------------------- - - -class PrepConfigBase(ABC): - - def __init__(self, logger, dictionary_file, suite, platform, override, advanced): - - self.override = override - - self.show_advanced = advanced - - # Store a logger for all to use - self.logger = logger - - # Store the name of the class inheriting base - self.prep_using = type(self).__name__.split('PrepConfig')[1] - - # Swell install path - swell_path = get_swell_path() - self.install_path = swell_path - - # Get the path and filename of the suite dictionary - self.directory = os.path.join(swell_path, 'suites') - self.filename = os.path.splitext(os.path.basename(dictionary_file))[0] - - # Keep track of the model, atmosphere, ocean etc - self.model_path = os.path.join(self.install_path, - 'configuration', - 'jedi/interfaces/') - self.model = None - self.model_flag = False - self.default_models = ['geos_atmosphere', 'geos_ocean'] - self.selected_models = None - - # Create executable keys list - self.exec_keys = [] - - # Experiment dictionary to be created and used in swell - self.experiment_dict = {} - - # Comment dictionary to be created and used to add comments to config file - self.comment_dict = {} - - # Dictionary validation things - self.valid_types = ['string', 'integer', 'float', - 'iso-datetime', 'iso-duration', - 'string-list', 'integer-list', - 'string-drop-list', 'string-check-list', - 'boolean'] - - # Disallowed element types - self.dis_elem_types = [datetime.datetime, datetime.date] - - # Track the suite and platform that the user may input through the prepare_config path - self.suite_to_run = suite - self.platform = platform - - # Open the platform specific defaults - platform_dict_file = os.path.join(swell_path, 'deployment', 'platforms', platform, - 'suite_questions.yaml') - with open(platform_dict_file, 'r') as platform_dict_file_open: - platform_dict_str = platform_dict_file_open.read() - - platform_task_file = os.path.join(swell_path, 'deployment', 'platforms', platform, - 'task_questions.yaml') - with open(platform_task_file, 'r') as platform_dict_file_open: - platform_dict_str = platform_dict_str + platform_dict_file_open.read() - - # Dictionary of defaults for platform - # Use for 'defer_to_platform' default - self.platform_dictionary = yaml.safe_load(platform_dict_str) - - # Open Master ask questions yaml - with open(os.path.join(swell_path, 'tasks', 'task_questions.yaml'), 'r') as ymlfile: - self.all_task_questions = yaml.safe_load(ymlfile) - - # Open tests override dictionary - # do this only on defaults - test_file = os.path.join(swell_path, 'test', 'suite_tests', suite + '-tier1.yaml') - - if os.path.exists(test_file): - with open(test_file, 'r') as test_yml: - self.test_dictionary = yaml.safe_load(test_yml) - else: - self.test_dictionary = {} - - # Open user selected override dictionary - if override is not None: - - logger.info(f'Overriding experiment dictionary settings using override dictionary') - - # If override is a dictionary then use it directly - if isinstance(override, dict): - self.override_dictionary = override - elif isinstance(override, str): - # If override is a string then assume it is a path to a yaml file - with open(override, 'r') as select_override_yml: - self.override_dictionary = yaml.safe_load(select_override_yml) - else: - logger.abort(f'Override must be a dictionary or a path to a yaml file. ' + - f'Instead it is {type(override)}') - - self.override_models() - - # ---------------------------------------------------------------------------------------------- - - def execute(self): - # Open the suite_to_run selected - # check_widgets goes in cli - # defaults gets nothing - - if self.prep_using == 'Cli': - self.logger.info("Please answer the following questions to generate your experiment " + - "configuration YAML file.\n") - - # Set current dictionary variable which is needed for answer changes - self.current_dictionary = {} - - # Generate task lists and question dictionaries - base_tasks, model_tasks = self.suite_setup() - - # Add task questions to base question dictionary - base_tasks_question_dict = self.task_dictionary_comber(base_tasks) - self.base_questions_dictionary.update(base_tasks_question_dict) - - # Base model questions (i.e. questions sent to model tasks that do not depend on model) - model_task_base_questions_tmp = self.task_dictionary_comber(model_tasks) - - model_task_base_questions = {} - for key in model_task_base_questions_tmp.keys(): - if 'models' not in model_task_base_questions_tmp[key].keys(): - model_task_base_questions[key] = model_task_base_questions_tmp[key] - - # Add to the base questions - self.base_questions_dictionary.update(model_task_base_questions) - - # Iterate over base questions - for k, v in self.base_questions_dictionary.items(): - self.key_passer(k, v) - - if self.model_flag: - # Find out what model components are to be used in the config - self.selected_models = self.get_models() - - # Generate default model answer dictionary - self.get_model_defaults() - - # Add tasks questions to model question dictionary - self.model_questions_dictionary = {} - model_tasks_question_dict = self.model_task_comber(model_tasks) - self.model_questions_dictionary.update(model_tasks_question_dict) - - # Prepend model suite question in front of model task questions - self.prepend_model_dict() - - model_questions_dictionary_copy = copy.deepcopy(self.model_questions_dictionary) - - # Iterate over base questions - for m in self.selected_models: - self.model_questions_dictionary[m] = \ - copy.deepcopy(model_questions_dictionary_copy[m]) - copy.deepcopy(model_questions_dictionary_copy[m]) - self.model = m - for k, v in self.model_questions_dictionary[m].items(): - self.key_passer(k, v) - - return - - # ---------------------------------------------------------------------------------------------- - - def suite_setup(self): - - # Create dictionary for asking about exp id and root - exp_questions = { - 'experiment_id': { - 'ask_question': True, - 'default_value': f'swell-{self.suite_to_run}', - 'prompt': 'Enter the experiment ID', - 'type': 'string', - }, - 'experiment_root': { - 'ask_question': True, - 'default_value': 'defer_to_platform', - 'prompt': 'Enter the path where experiment will be staged', - 'type': 'string', - }, - 'platform': { - 'ask_question': False, - 'default_value': self.platform, - 'prompt': 'Enter the platform on which experiment will run', - 'type': 'string' - }, - 'suite_to_run': { - 'ask_question': False, - 'default_value': self.suite_to_run, - 'prompt': 'Enter the suite you wish to run', - 'type': 'string' - } - } - - # Look in suite_to_run dir and check for suite yaml file, if not, grep the flow.cylc file - - # Check which suite to run, check for suite_questions yaml, and grep flow file - self.directory = os.path.join(self.directory, self.suite_to_run) - suite_questions_path = os.path.join(self.directory, 'suite_questions.yaml') - - self.base_questions_dictionary = {} - self.base_questions_dictionary.update(exp_questions) - - if os.path.exists(suite_questions_path): - with open(suite_questions_path, 'r') as suite_dict_file: - suite_questions_dict = yaml.safe_load(suite_dict_file.read()) - - # Need to create a distint model suite dictionary to prepend later - # onto model questions from task_questions.yaml file - self.model_suite_questions = {} - - for k, v in suite_questions_dict.items(): - if 'models' in v.keys(): - self.model_flag = True - self.model_suite_questions[k] = v - else: - self.base_questions_dictionary[k] = v - - # Get the tasks asked prior to model selection and model-based tasks - base_tasks, model_tasks = self.open_flow() - - return base_tasks, model_tasks - - # ---------------------------------------------------------------------------------------------- - - def open_flow(self): - # open text file in read mode - cylc_file = open(os.path.join(self.directory, 'flow.cylc'), "r") - - # read whole file to a string - data = cylc_file.read() - - # close file - cylc_file.close() - - # Find the double bracket items in the Tasks section of the flow file to ID required tasks - task_s = data[(data.find('Tasks')):] - task_s_lines = task_s.split('\n') - - base_task_list = [] - model_task_list = [] - for line in task_s_lines: - if 'script = "swell task' in line: - task_name = line.split('"swell task')[1].split(' ')[1] - if '-m' in line: - if task_name in model_task_list: - continue - model_task_list.append(task_name) - else: - if task_name in base_task_list: - continue - base_task_list.append(task_name) - - return base_task_list, model_task_list - - # ---------------------------------------------------------------------------------------------- - - def model_task_comber(self, model_tasks): - model_task_dict = {} - for m in self.selected_models: - self.model = m - model_task_dict[m] = self.task_dictionary_comber(model_tasks) - return model_task_dict - - # ---------------------------------------------------------------------------------------------- - - def task_dictionary_comber(self, task_list): - question_dict = {} - for t in task_list: - self.current_dictionary = {} - task_dict = {} - dependent_dict = {} - # Pass the task list to get the question dictionaries required for - # the task. Do not get the questions that depend on prior questions - task_dict = self.build_question_dictionary(t, get_dependents=False) - # Get the question dictionaries that depend on previous questions - dependent_dict = self.build_question_dictionary(t, get_dependents=True) - # Combine the two dictionaries above - task_dict.update(dependent_dict) - question_dict.update(task_dict) - del task_dict - del dependent_dict - return question_dict - - # ---------------------------------------------------------------------------------------------- - - def build_question_dictionary(self, task, get_dependents): - question_dictionary = {} - model_key_list = ['all'] + [self.model] - big_dictionary = copy.deepcopy(self.all_task_questions) - for k, v in big_dictionary.items(): - if self.model is not None: - if 'models' in v.keys() and v['models'][0] in model_key_list: - pass - else: - continue - if task in v['tasks']: - if get_dependents: - if 'depends' in v.keys(): - question_dictionary[k] = v - else: - if 'depends' not in v.keys(): - question_dictionary[k] = v - del big_dictionary - return question_dictionary - - # ---------------------------------------------------------------------------------------------- - - def override_models(self): - if 'model_components' in self.test_dictionary.keys(): - self.default_models = self.test_dictionary['model_components'] - if self.override is not None: - if 'model_components' in self.override_dictionary.keys(): - self.default_models = self.override_dictionary['model_components'] - - # ---------------------------------------------------------------------------------------------- - - def get_model_defaults(self): - self.model_defaults_dict = {} - for m in self.selected_models: - self.model_defaults_dict[m] = {} - for s in ['suite', 'task']: - model_dict_path = os.path.join(self.model_path, m, f'{s}_questions.yaml') - with open(model_dict_path, 'r') as model_dict_file: - model_comp_dict = yaml.safe_load(model_dict_file.read()) - self.model_defaults_dict[m].update(model_comp_dict) - - # ---------------------------------------------------------------------------------------------- - - def show_deference(self, key, el_dict): - - if 'defer_to_' in str(el_dict['default_value']): - pass - else: - return el_dict - - if 'defer_to_platform' == el_dict['default_value']: - el_dict['default_value'] = self.platform_dictionary[key]['default_value'] - elif 'defer_to_model' == el_dict['default_value']: - el_dict['default_value'] = self.model_defaults_dict[self.model][key]['default_value'] - if 'options' in self.model_defaults_dict[self.model][key].keys()\ - and 'defer_to_model' == el_dict['options']: - el_dict['options'] = self.model_defaults_dict[self.model][key]['options'] - - return el_dict - - # ---------------------------------------------------------------------------------------------- - - def override_defaults(self, key, el_dict): - - override_dict = None - - if self.model is None: - if key in self.test_dictionary.keys(): - override_dict = self.test_dictionary - else: - pass - if self.override is not None: - if key in self.override_dictionary.keys(): - override_dict = self.override_dictionary - else: - pass - if override_dict is None: - return el_dict - else: - override_default = override_dict[key] - if 'options' in el_dict.keys(): - if isinstance(override_default, list): - override_options = override_default - else: - override_options = [override_default] - else: - override_options = None - else: - if 'models' in self.test_dictionary.keys(): - if self.model in self.test_dictionary['models'].keys(): - if key in self.test_dictionary['models'][self.model].keys(): - override_dict = self.test_dictionary - else: - pass - if self.override is not None: - if 'models' in self.override_dictionary.keys(): - if self.model in self.override_dictionary['models'].keys(): - if key in self.override_dictionary['models'][self.model].keys(): - override_dict = self.override_dictionary - else: - pass - - if override_dict is None: - return el_dict - else: - override_default = override_dict['models'][self.model][key] - if 'options' in el_dict.keys(): - if isinstance(override_default, list): - override_options = override_default - else: - override_options = [override_default] - else: - override_options = None - - el_dict['default_value'] = override_default - if 'options' in el_dict.keys(): - el_dict['options'] = override_options - - return el_dict - - # ---------------------------------------------------------------------------------------------- - - def key_passer(self, key, el_dict): - - # Validate the element dictionary - self.validate_dictionary(el_dict) - - # Check that the key does not have a dependency - depends_flag = True - if 'depends' in el_dict.keys(): - dep_key = el_dict['depends']['key'] - dep_val = el_dict['depends']['value'] - if self.model is None: - if self.experiment_dict[dep_key] != dep_val: - depends_flag = False - else: - if self.experiment_dict['models'][self.model][dep_key] != dep_val: - depends_flag = False - - # In this case the key is not expected to refer to a sub dictionary but have - # everything needed in the elements dictionary - if depends_flag: - - el_dict = self.show_deference(key, el_dict) - - el_dict = self.override_defaults(key, el_dict) - - if self.show_advanced: - el_dict['ask_question'] = True - - if el_dict['ask_question']: - el_dict['default_value'] = self.get_answer(key, el_dict) - else: - pass - self.add_to_experiment_dictionary(key, el_dict) - - return - - # ---------------------------------------------------------------------------------------------- - - def prepend_model_dict(self): - complete_model_dict = {} - for m in self.selected_models: - model_key_list = ['all'] + [m] - complete_model_dict[m] = {} - for k, v in self.model_suite_questions.items(): - if v['models'][0] in model_key_list: - complete_model_dict[m][k] = v - complete_model_dict[m].update(self.model_questions_dictionary[m]) - self.model_questions_dictionary = complete_model_dict - - # ---------------------------------------------------------------------------------------------- - - def validate_dictionary(self, dictionary): - - # Check for required key - required_keys = ['default_value', 'prompt', 'type'] - for required_key in required_keys: - if required_key not in dictionary.keys(): - self.logger.abort(f'Each section of the suites config files must contain the key ' + - f'\'{required_key}\'. Offending dictionary: \n {dictionary}') - - # Check that type is supported - type = dictionary['type'] - if type not in self.valid_types: - self.logger.abort(f'Dictionary has type \'{type}\' that is not one of the supported ' + - f'types: {self.valid_types}. Offending dictionary: \n {dictionary}') - - # ---------------------------------------------------------------------------------------------- - - def add_to_experiment_dictionary(self, key, element_dict): - - # Add elements to the current dictionary - # -------------------------------------- - self.current_dictionary[key] = element_dict - - # Add executed key to exec_keys list - # ---------------------------------- - self.exec_keys.append(key) - - # Set the element - # --------------- - element = element_dict['default_value'] - prompt = element_dict['prompt'] - - # Validate the element - # -------------------- - - # Ensure always a list to make following logic not need to check if list or not - if not isinstance(element, list): - element_items = [element] - else: - element_items = element - - # Check for disallowed element types - for element_item in element_items: - for dis_elem_type in self.dis_elem_types: - if isinstance(element_item, dis_elem_type): - self.logger.abort(f'Element \'{element}\' has a type that is not permitted. ' + - f'Type is \'{dis_elem_type}\'. Try replacing with a string ' + - f'in the configuration file.') - - # Validate the key - # ---------------- - - # Ensure there are no spaces in the key - if ' ' in key: - self.logger.abort(f'Key \'{key}\' contains a space. For consistency across the ' + - f'configurations please avoid spaces and instead use _ if needed.') - - # Check that dictionary does not already contain the key - if key in self.experiment_dict.keys(): - - self.logger.abort(f'Key \'{key}\' is already in the experiment dictionary.') - - # Check if models key is present in experiment dictionary - if self.model is not None: - if 'models' not in self.experiment_dict.keys(): - self.experiment_dict['models'] = {} - - # If specific model dictionary not added to the list of model then add it - if self.model not in self.experiment_dict['models'].keys(): - self.experiment_dict['models'][self.model] = {} - - # Make sure the element was not already added - # ------------------------------------------- - if self.model is None: - if key in self.experiment_dict.keys(): - self.logger.abort(f'Key \'{key}\' is already in the experiment dictionary.') - else: - if key in self.experiment_dict['models'][self.model].keys(): - self.logger.abort(f'Key \'{key}\' is already in the experiment dictionary.') - - # Add element - # ----------- - if self.model is None: - self.experiment_dict[key] = element - else: - self.experiment_dict['models'][self.model][key] = element - - # Add option - # ---------- - if self.model is None: - option_key = key - else: - if 'models' not in self.comment_dict.keys(): - self.comment_dict['models'] = 'Options for individual model components' - if 'models.' + self.model not in self.comment_dict.keys(): - self.comment_dict['models.' + self.model] = f'Options for the {self.model} ' + \ - f'model component' - option_key = 'models.' + self.model + '.' + key - self.comment_dict[option_key] = prompt - - # ---------------------------------------------------------------------------------------------- - - def update_experiment_dictionary(self, key, new_value): - if self.model is not None: - self.experiment_dict['models'][self.model][key] = new_value - else: - self.experiment_dict[key] = new_value - - # ---------------------------------------------------------------------------------------------- - - @abstractmethod - def get_answer(self, dictionary): - pass - # The subclass has to implement an execute method since this is how it is called into - # action. - -# -------------------------------------------------------------------------------------------------- - - -def camel_to_snake(s): - new_string = re.sub(r'(? None: + + # Store local copy of the inputs + self.logger = logger + self.suite = suite + self.platform = platform + self.override = override + + # Assign the client that will take care of providing responses + if config_client.lower() == 'cli': + self.config_client = GetAnswerCli() + elif config_client.lower() == 'defaults': + self.config_client = GetAnswerDefaults() + + # Big dictionary that contains all user responses as well a dictionary containing the + # questions that were asked + self.experiment_dict = {} + self.questions_dict = {} + + # Get list of all possible models + self.possible_model_components = os.listdir(os.path.join(get_swell_path(), 'configuration', + 'jedi', 'interfaces')) + + # Read suite file into a string + suite_file = os.path.join(get_swell_path(), 'suites', self.suite, 'flow.cylc') + with open(suite_file, 'r') as suite_file_open: + self.suite_str = suite_file_open.read() + + # Perform the assembly of the dictionaries that contain all the questions that can possibly + # be asked. This + self.prepare_question_dictionaries() + self.override_with_defaults() + self.override_with_external() + + # ---------------------------------------------------------------------------------------------- + + def prepare_question_dictionaries(self) -> None: + + """ + Read the suite and task question YAML files and perform various steps: + + 1. Read suite and task dictionaries into a single dictionary + 2. Discard questions not associated with this suite + 3. Split the dictionary into model independent and model dependent dictionaries + 4. Create a dictionary for each possible model component + + At the end there will be two dictionaries that look like this (in YAML format): + + self.question_dictionary_model_ind: + question1: + ask_question: True + default_value: 'defer_to_' + prompt: ... + + self.question_dictionary_model_dep: + model1: + question1: + ask_question: True + default_value: 'defer_to_' + prompt: ... + model2: + question1: + ask_question: True + default_value: 'defer_to_' + prompt: ... + """ + + # Read suite questions into a dictionary + suite_questions_file = os.path.join(get_swell_path(), 'suites', 'suite_questions.yaml') + with open(suite_questions_file, 'r') as ymlfile: + question_dictionary = yaml.safe_load(ymlfile) + + # Read task questions into a dictionary + task_questions_file = os.path.join(get_swell_path(), 'tasks', 'task_questions.yaml') + with open(task_questions_file, 'r') as ymlfile: + question_dictionary_tasks = yaml.safe_load(ymlfile) + + # Loop through question_dictionary_tasks. If the key does not already exist add to the + # question_dictionary. If the key does exist then only add the tasks key to the existing + # question_dictionary + for key, val in question_dictionary_tasks.items(): + if key not in question_dictionary.keys(): + question_dictionary[key] = val + else: + # In this case the question is both a suite question and a task question. + # To avoid any confusion, only the tasks key is taken from the task dictionary + question_dictionary[key]['tasks'] = val['tasks'] + + # Iterate over the question_dictionary dictionary and remove keys not associated with this + # suite. Note that there might be questions that are not needed by the suite but could still + # be needed by the tasks in the suite. These are not removed but the suite key is removed. + # Note also that at this point we do not know which tasks will actually be needed so we + # can only remove questions that are known not to be needed by the suite. + keys_to_remove = [] + for key, val in question_dictionary.items(): + if 'suites' in val: + # If this suite question needed then skip to the next question + if val['suites'] == ['all'] or self.suite in val['suites']: + continue + else: + if 'tasks' not in val: + # Question not needed by suite and not a task question: remove + keys_to_remove.append(key) + else: + # Question not needed by suite but might be needed by tasks. + # Reduce to a task only question. + val.pop('suites') + for key in keys_to_remove: + del question_dictionary[key] + + # At this point we can check to see if this is a suite that requires model components + self.suite_needs_model_components = True + if 'model_components' not in question_dictionary.keys(): + self.suite_needs_model_components = False + + # Create copy of the question_dictionary for model independent questions + question_dictionary_model_ind = copy.deepcopy(question_dictionary) + + # Iterate through the model_ind dictionary and remove questions associated with models + # and questions not required by the suite + keys_to_remove = [] + for key, val in question_dictionary_model_ind.items(): + if 'models' in val.keys(): + keys_to_remove.append(key) + + # Cycle times can be a special case that is needed even when models are not. Though if they + # are then the cycle times are needed for each model component. So we need to check if the + # suite needs cycle_times + + # If there are no models and the cycle_times is in the keys to remove then remove it + if not self.suite_needs_model_components and 'cycle_times' in keys_to_remove: + keys_to_remove.remove('cycle_times') + + # Now remove the keys + for key in keys_to_remove: + del question_dictionary_model_ind[key] + self.question_dictionary_model_ind = copy.deepcopy(question_dictionary_model_ind) + + # If there are no models and the cycle_times is in the keys then remove the models key from + # the cycle_times question dictionary + if 'cycle_times' in self.question_dictionary_model_ind.keys(): + if not self.suite_needs_model_components: + self.question_dictionary_model_ind['cycle_times'].pop('models') + self.question_dictionary_model_ind['cycle_times']['default_value'] = 'T00' + + # At this point we can return if there are no model components + if not self.suite_needs_model_components: + return + + # Create copy of the question_dictionary for model dependent questions + question_dictionary_model_dep = copy.deepcopy(question_dictionary) + + # Iterate through the model_dep dictionary and remove questions not associated with models + # and questions not required by the suite + keys_to_remove = [] + for key, val in question_dictionary_model_dep.items(): + if 'models' not in val.keys(): + keys_to_remove.append(key) + for key in keys_to_remove: + del question_dictionary_model_dep[key] + + # Create new questions dictionary for each model component + self.question_dictionary_model_dep = {} + for model in self.possible_model_components: + self.question_dictionary_model_dep[model] = copy.deepcopy(question_dictionary_model_dep) + + # Remove any questions that are not associated with the model component + for model in self.possible_model_components: + keys_to_remove = [] + for key, val in self.question_dictionary_model_dep[model].items(): + if val['models'] != ['all'] and model not in val['models']: + keys_to_remove.append(key) # Remove if not needed by this model + + for key in keys_to_remove: + del self.question_dictionary_model_dep[model][key] + + # ---------------------------------------------------------------------------------------------- + + def override_with_defaults(self) -> None: + + # Perform a platform override on the model_ind dictionary + # ------------------------------------------------------- + platform_defaults = {} + for suite_task in ['suite', 'task']: + platform_dict_file = os.path.join(get_swell_path(), 'deployment', 'platforms', + self.platform, f'{suite_task}_questions.yaml') + with open(platform_dict_file, 'r') as ymlfile: + platform_defaults.update(yaml.safe_load(ymlfile)) + + # Loop over the keys in self.question_dictionary_model_ind and update with platform_defaults + # if that dictionary shares the key + for key, val in self.question_dictionary_model_ind.items(): + if key in platform_defaults.keys(): + self.question_dictionary_model_ind[key].update(platform_defaults[key]) + + # Perform a model override on the model_dep dictionary + # ---------------------------------------------------- + if self.suite_needs_model_components: + for model, model_dict in self.question_dictionary_model_dep.items(): + + # Open the suite and task default dictionaries + model_defaults = {} + for suite_task in ['suite', 'task']: + model_dict_file = os.path.join(get_swell_path(), 'configuration', 'jedi', + 'interfaces', model, + f'{suite_task}_questions.yaml') + with open(model_dict_file, 'r') as ymlfile: + model_defaults.update(yaml.safe_load(ymlfile)) + + # Loop over the keys in self.question_dictionary_model_ind and update with + # model_defaults or platform_defaults if that dictionary shares the key + for key, val in model_dict.items(): + if key in model_defaults.keys(): + model_dict[key].update(model_defaults[key]) + + if key in platform_defaults.keys(): + model_dict[key].update(platform_defaults[key]) + + # Look for defer_to_code in the model_ind dictionary + # -------------------------------------------------- + for key, val in self.question_dictionary_model_ind.items(): + if val['default_value'] == 'defer_to_code': + + if key == 'experiment_id': + val['default_value'] = f'swell-{self.suite}' + + if key == 'model_components': + val['default_value'] = self.possible_model_components + val['options'] = self.possible_model_components + + # ---------------------------------------------------------------------------------------------- + + def override_with_external(self) -> None: + + # Create and override dictionary + override_dict = {} + + # Always start the override with the a suite test file + test_file = os.path.join(get_swell_path(), 'test', 'suite_tests', + self.suite + '-tier1.yaml') + if os.path.exists(test_file): + with open(test_file, 'r') as ymlfile: + override_dict = yaml.safe_load(ymlfile) + + # Now append with any user provided override + if self.override is not None: + + if isinstance(self.override, dict): + override_dict.update(self.override) + elif isinstance(self.override, str): + with open(self.override, 'r') as ymlfile: + override_dict.update(yaml.safe_load(ymlfile)) + else: + self.logger.abort(f'Override must be a dictionary or a path to a yaml file.') + + # In this case the user is sending in a dictionary that looks like the experiment dictionary + # that they will ultimately be looking at. This means the dictionary does not contain + # default_value or options and the override cannot be performed. + + # Iterate over the model_ind dictionary and override + # -------------------------------------------------- + for key, val in self.question_dictionary_model_ind.items(): + if key in override_dict: + val['default_value'] = override_dict[key] + + # Iterate over the model_dep dictionary and override + # -------------------------------------------------- + if self.suite_needs_model_components: + for model, model_dict in self.question_dictionary_model_dep.items(): + for key, val in model_dict.items(): + if model in override_dict['models']: + if key in override_dict['models'][model]: + val['default_value'] = override_dict['models'][model][key] + + # ---------------------------------------------------------------------------------------------- + + def ask_questions_and_configure_suite(self) -> Tuple[dict, dict]: + + """ + This is where we ask all the questions and as we go configure the suite file. The process + is rather complex and proceeds as described below. The order is determined by what makes + sense to a user that is going through answering questions. For example we want them to be + able to answer all the questions associated with a certain model together. While there is + work going on behind the scenes to configure the suite file the user should not see a break + in the questioning or a back and forth that causes confusion. + + 1. Ask the model independent suite questions. + + 2. Perform a non-exhaustive resolving of suite file templates. Non-exhaustive because at + this point we have not asked the model dependent suite questions so there may be more + templates to resolve. + + 3. Get a list of tasks that do not depend on the model component. + + 4. Ask the model independent task questions. + + 5. Check that the suite in question has model_components + + 6. Ask the model dependent suite questions. + + 7. Perform an exhaustive resolving of suite file templates. Now it is exhaustive because at + this point we should have all the required information to resolve all the templates. + + 8. Build a list of tasks for each model component. + + 9. Ask the model dependent task questions. + """ + + # If the client is CLI put out some information about what is due to happen next + if self.config_client.__class__.__name__ == 'GetAnswerCli': + self.logger.info("Please answer the following questions to configure your experiment ") + + # 1. Iterate over the model_ind dictionary and ask questions + # ---------------------------------------------------------- + for question_key in self.question_dictionary_model_ind: + + # Ask only the suite questions first + # ---------------------------------- + if 'suites' in self.question_dictionary_model_ind[question_key]: + + # Ask the question + self.ask_a_question(self.question_dictionary_model_ind, question_key) + + # 2. Perform a non-exhaustive resolving of suite file templates + # ------------------------------------------------------------- + suite_str = template_string_jinja2(self.logger, self.suite_str, self.experiment_dict, True) + + # 3. Get a list of tasks that do not depend on the model component + # ---------------------------------------------------------------- + model_ind_tasks = self.get_suite_task_list_model_ind(suite_str) + + # 4.1 Iterate over the model_ind dictionary and ask task questions + # ---------------------------------------------------------------- + for question_key in self.question_dictionary_model_ind: + + # Ask the task questions + # ---------------------- + if 'suites' not in self.question_dictionary_model_ind[question_key]: + + # Get list of tasks for the question + question_tasks = self.question_dictionary_model_ind[question_key]['tasks'] + + # Check whether any of model_ind_tasks are in question_tasks + if any(elem in question_tasks for elem in model_ind_tasks): + + # Ask the question + self.ask_a_question(self.question_dictionary_model_ind, question_key) + + # 5. Check that the suite in question has model_components + # -------------------------------------------------------- + if not self.suite_needs_model_components: + return self.experiment_dict, self.questions_dict + + # 6. Iterate over the model_dep dictionary and ask suite questions + # ---------------------------------------------------------------- + + # At this point the user should have provided the model components answer. Check that it is + # in the experiment dictionary and retrieve the response + if 'model_components' not in self.experiment_dict: + self.logger.abort('The model components question has not been answered.') + + for model in self.experiment_dict['model_components']: + + model_dict = self.question_dictionary_model_dep[model] + + # Loop over keys of each model + for question_key in model_dict: + + # Ask only the suite questions first + if 'suites' in model_dict[question_key]: + + # Ask the question + self.ask_a_question(model_dict, question_key, model) + + # 7. Perform a more exhaustive resolving of suite file templates + # -------------------------------------------------------------- + # Note that we reset the suite file to avoid templates having been left unresolved + # (removed) from the previous attempt. We still do not ask for an exhaustive resolving + # of templates because there are things related to scheduling that are not yet able to be + # resolved. In the future it might be good to bring some of that information into the + # sphere of suite questions but that requires some careful thought so as not to overload + # the user with questions. + suite_str = template_string_jinja2(self.logger, self.suite_str, self.experiment_dict, + True) + + # 8. Build a list of tasks for each model component + # ------------------------------------------------- + model_dep_tasks, all_tasks = self.get_suite_task_list_model_dep(suite_str) + + # 9.1 Ask the new task questions that do not actually depend on the model + # ----------------------------------------------------------------------- + for question_key in self.question_dictionary_model_ind: + + if 'tasks' in self.question_dictionary_model_ind[question_key]: + + # Get list of tasks for the question + question_tasks = self.question_dictionary_model_ind[question_key]['tasks'] + + # Check whether any of model_dep_tasks are in question_tasks + if any(elem in question_tasks for elem in all_tasks): + + # Ask the question + self.ask_a_question(self.question_dictionary_model_ind, question_key) + + # 9.2 Iterate over the model_dep dictionary and ask task questions + # ---------------------------------------------------------------- + for model in self.experiment_dict['model_components']: + + # Iterate over the model_dep dictionary and ask questions + # ------------------------------------------------------- + for question_key in self.question_dictionary_model_dep[model]: + + # Ask only the task questions first + # ---------------------------------- + if 'suites' not in self.question_dictionary_model_dep[model][question_key]: + + # Get list of tasks for the question + question_tasks = \ + self.question_dictionary_model_dep[model][question_key]['tasks'] + + # Check whether any of model_dep_tasks are in question_tasks + if any(elem in question_tasks for elem in model_dep_tasks[model]): + + # Ask the question + self.ask_a_question(self.question_dictionary_model_dep[model], question_key, + model) + + # Return the main experiment dictionary + return self.experiment_dict, self.questions_dict + + # ---------------------------------------------------------------------------------------------- + def ask_a_question( + self, + full_question_dictionary: dict, + question_key: str, + model: Optional[str] = None + ) -> None: + + # Set flag for whether the question should be asked + ask_question = True + + # Has the question already been asked? + if question_key in self.experiment_dict: + ask_question = False + + # Dictionary for this question + qd = full_question_dictionary[question_key] + + # If model is not none then ensure the experiment dictionary has a dictionary for the model + if model is not None: + if 'models' not in self.experiment_dict: + self.experiment_dict['models'] = {} + self.questions_dict['models'] = f"Configurations for the model components." + if model not in self.experiment_dict['models']: + self.experiment_dict['models'][model] = {} + self.questions_dict[f'models.{model}'] = \ + f"Configuration for the {model} model component." + + # Check the dependency chain for the question + if 'depends' in qd: + + # Check is dependency has been asked + if qd['depends']['key'] not in self.experiment_dict: + + # Iteratively ask the dependent question + self.ask_a_question(full_question_dictionary, qd['depends']['key'], model) + + # Check that answer for dependency matches the required value + if model is None: + if self.experiment_dict[qd['depends']['key']] != qd['depends']['value']: + ask_question = False + else: + prev = self.experiment_dict['models'][model][qd['depends']['key']] + if prev != qd['depends']['value']: + ask_question = False + + # Ask the question using the selected client + if ask_question: + if model is None: + self.experiment_dict[question_key] = self.config_client.get_answer(question_key, qd) + self.questions_dict[question_key] = qd['prompt'] + else: + self.experiment_dict['models'][model][question_key] = \ + self.config_client.get_answer(question_key, qd) + self.questions_dict[f'models.{model}.{question_key}'] = qd['prompt'] + + # ---------------------------------------------------------------------------------------------- + + def get_suite_task_list_model_ind(self, suite_str: str) -> list: + + # Search the suite string for lines containing 'swell task' and not '-m' + swell_task_lines = [line for line in suite_str.split('\n') if 'swell task' in line and + '-m' not in line] + + # Now get the task part + tasks = [] + for line in swell_task_lines: + # Split by 'swell task' + # Remove any leading spaces + # Split by space + tasks.append(line.split('swell task')[1].strip().split(' ')[0]) + + # Ensure there are no duplicate tasks + tasks = list(set(tasks)) + + # Return tasks + return tasks + + # ---------------------------------------------------------------------------------------------- + + def get_suite_task_list_model_dep(self, suite_str: str) -> Tuple[dict, list]: + + # Search the suite string for lines containing 'swell task' and '-m' + swell_task_lines = [line for line in suite_str.split('\n') if 'swell task' in line and + '-m' in line] + + # Strip " and spaces from all lines + swell_task_lines = [line.replace('"', '') for line in swell_task_lines] + swell_task_lines = [line.strip() for line in swell_task_lines] + + # Now get the model part + models = [] + for line in swell_task_lines: + models.append(line.split('-m')[1].split('0')[0].strip()) + + # Unique models + models = list(set(models)) + + # All tasks + all_tasks = [] + + # Assemble dictionary where key is model and val is the tasks that model is associated with + model_tasks = {} + for model in models: + + # Get all elements of swell_task_lines that contains "-m {model}" + model_tasks_this_model = [line for line in swell_task_lines if f'-m {model}' in line] + + # Get task name + tasks = [] + for line in model_tasks_this_model: + tasks.append(line.split('swell task ')[1].split(' ')[0]) + + # Unique model tasks + model_tasks[model] = list(set(tasks)) + + # Also append all tasks + all_tasks += tasks + + # Ensure all_tasks are unique + all_tasks = list(set(all_tasks)) + + # Return the dictionary + return model_tasks, all_tasks + +# -------------------------------------------------------------------------------------------------- diff --git a/src/swell/deployment/prep_config_cli.py b/src/swell/deployment/prepare_config_and_suite/question_and_answer_cli.py similarity index 73% rename from src/swell/deployment/prep_config_cli.py rename to src/swell/deployment/prepare_config_and_suite/question_and_answer_cli.py index 5880ec24..dd11df1d 100644 --- a/src/swell/deployment/prep_config_cli.py +++ b/src/swell/deployment/prepare_config_and_suite/question_and_answer_cli.py @@ -8,20 +8,17 @@ # -------------------------------------------------------------------------------------------------- -import glob -import os import re import sys import questionary from questionary import Choice -from swell.deployment.prep_config_base import PrepConfigBase # -------------------------------------------------------------------------------------------------- -class PrepConfigCli(PrepConfigBase): +class GetAnswerCli: def get_answer(self, key, val): # Set questionary variable @@ -63,33 +60,6 @@ def get_answer(self, key, val): # ---------------------------------------------------------------------------------------------- - def get_models(self): - - model_options = glob.glob(os.path.join(self.model_path, '*')) - model_options.sort() - - model_options = [os.path.basename(x) for x in model_options] - - choices = [] - - for mod in model_options: - if mod in self.default_models: - choices.append(Choice(mod, checked=True)) - else: - choices.append(Choice(mod, checked=False)) - - selected_models = self.make_check_widget('Which model components are required?', - choices, - default=None, - prompt=questionary.checkbox) - - if 'None' in selected_models: - selected_models = [] - - return selected_models - - # ---------------------------------------------------------------------------------------------- - def make_string_widget(self, quest, default, prompt): answer = prompt(f"{quest} [{default}]", default=default).ask() @@ -158,12 +128,12 @@ def make_duration(self, quest, default, prompt): class durValidator(questionary.Validator): def validate(self, document): - r = re.compile('PT\d{1,2}H') # noqa + r = re.compile('[-]?P(T\d{1,2}H|\d{1,2}D)') # noqa if r.match(document.text) is None and document.text != 'EXIT': raise questionary.ValidationError( message="Please enter a duration with the following format: PThhH", cursor_position=len(document.text), - ) + ) # Need to add validation to allow negative sign in the front. if isinstance(default, list): answer_list = [] @@ -180,7 +150,7 @@ def validate(self, document): else: answer_list.append(answer) elif isinstance(default, str): - answer = prompt(f"{quest}\n[format PThhH e.g. {default}]", + answer = prompt(f"{quest}\n[format PThhH or -PThhH e.g. {default}]", validate=durValidator, default=default).ask() return answer @@ -203,33 +173,5 @@ def make_check_widget(self, quest, options, default, prompt): else 'Please select one option').ask() return answer - # ---------------------------------------------------------------------------------------------- - - def before_next(self): - changer = self.make_boolean('Do you wish to change any of your entries?', - False, - questionary.confirm) - if changer: - keys = self.exec_keys - for k in keys: - if k not in list(self.current_dictionary.keys()): - non_exec_idx = keys.index(k) - keys.pop(non_exec_idx) - # Show user key change options and retrieve new values - change_keys = self.make_check_widget('Which elements would you like to change?', - keys, - None, - questionary.checkbox) - - for k in change_keys: - changed_dict = self.current_dictionary[k] - new_default_value = self.get_answer(k, changed_dict) - if k == keys[-1]: - changed_dict['default_value'] = new_default_value - return changed_dict - else: - self.update_experiment_dictionary(k, new_default_value) - self.exec_keys = [] - return None # -------------------------------------------------------------------------------------------------- diff --git a/src/swell/deployment/prep_config_defaults.py b/src/swell/deployment/prepare_config_and_suite/question_and_answer_defaults.py similarity index 53% rename from src/swell/deployment/prep_config_defaults.py rename to src/swell/deployment/prepare_config_and_suite/question_and_answer_defaults.py index 0c5fc8f0..fb3e2082 100644 --- a/src/swell/deployment/prep_config_defaults.py +++ b/src/swell/deployment/prepare_config_and_suite/question_and_answer_defaults.py @@ -8,25 +8,9 @@ # -------------------------------------------------------------------------------------------------- -from swell.deployment.prep_config_base import PrepConfigBase - - -# -------------------------------------------------------------------------------------------------- - - -class PrepConfigDefaults(PrepConfigBase): +class GetAnswerDefaults: def get_answer(self, key, val): return val['default_value'] - # ---------------------------------------------------------------------------------------------- - - def get_models(self): - return self.default_models - - # ---------------------------------------------------------------------------------------------- - - def before_next(self): - return None - # -------------------------------------------------------------------------------------------------- diff --git a/src/swell/suites/3dfgat_atmos/flow.cylc b/src/swell/suites/3dfgat_atmos/flow.cylc index 24747468..2541ef77 100644 --- a/src/swell/suites/3dfgat_atmos/flow.cylc +++ b/src/swell/suites/3dfgat_atmos/flow.cylc @@ -118,9 +118,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} {% for model_component in model_components %} @@ -150,9 +150,9 @@ platform = {{platform}} execution time limit = {{scheduling["RunJediVariationalExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediVariationalExecutable"]["directives"]["all"].items() %} + {%- for key, value in scheduling["RunJediVariationalExecutable"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[EvaJediLog-{{model_component}}]] script = "swell task EvaJediLog $config -d $datetime -m {{model_component}}" @@ -165,9 +165,9 @@ platform = {{platform}} execution time limit = {{scheduling["EvaObservations"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} + {%- for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[SaveObsDiags-{{model_component}}]] script = "swell task SaveObsDiags $config -d $datetime -m {{model_component}}" diff --git a/src/swell/suites/3dfgat_atmos/suite_questions.yaml b/src/swell/suites/3dfgat_atmos/suite_questions.yaml deleted file mode 100644 index b7032d19..00000000 --- a/src/swell/suites/3dfgat_atmos/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -runahead_limit: - ask_question: True - default_value: 'P3' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/suites/3dvar/eva/increment-geos_ocean.yaml b/src/swell/suites/3dvar/eva/increment-geos_ocean.yaml index b0a80f4b..eef84878 100644 --- a/src/swell/suites/3dvar/eva/increment-geos_ocean.yaml +++ b/src/swell/suites/3dvar/eva/increment-geos_ocean.yaml @@ -101,4 +101,3 @@ graphics: cmap: 'bwr' vmin: -1 vmax: 1 - diff --git a/src/swell/suites/3dvar/flow.cylc b/src/swell/suites/3dvar/flow.cylc index f2229847..3e598bfa 100644 --- a/src/swell/suites/3dvar/flow.cylc +++ b/src/swell/suites/3dvar/flow.cylc @@ -115,9 +115,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} {% for model_component in model_components %} [[StageJedi-{{model_component}}]] @@ -140,18 +140,18 @@ platform = {{platform}} execution time limit = {{scheduling["GenerateBClimatology"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["GenerateBClimatology"]["directives"][model_component].items() %} + {%- for key, value in scheduling["GenerateBClimatology"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[RunJediVariationalExecutable-{{model_component}}]] script = "swell task RunJediVariationalExecutable $config -d $datetime -m {{model_component}}" platform = {{platform}} execution time limit = {{scheduling["RunJediVariationalExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediVariationalExecutable"]["directives"][model_component].items() %} + {%- for key, value in scheduling["RunJediVariationalExecutable"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[EvaJediLog-{{model_component}}]] script = "swell task EvaJediLog $config -d $datetime -m {{model_component}}" @@ -164,9 +164,9 @@ platform = {{platform}} execution time limit = {{scheduling["EvaObservations"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} + {%- for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[SaveObsDiags-{{model_component}}]] script = "swell task SaveObsDiags $config -d $datetime -m {{model_component}}" diff --git a/src/swell/suites/3dvar/suite_questions.yaml b/src/swell/suites/3dvar/suite_questions.yaml deleted file mode 100644 index 71c889f6..00000000 --- a/src/swell/suites/3dvar/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -runahead_limit: - ask_question: True - default_value: 'P4' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/suites/3dvar_atmos/flow.cylc b/src/swell/suites/3dvar_atmos/flow.cylc index b2041421..adf30165 100644 --- a/src/swell/suites/3dvar_atmos/flow.cylc +++ b/src/swell/suites/3dvar_atmos/flow.cylc @@ -118,9 +118,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} {% for model_component in model_components %} @@ -150,9 +150,9 @@ platform = {{platform}} execution time limit = {{scheduling["RunJediVariationalExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediVariationalExecutable"]["directives"][model_component].items() %} + {%-for key, value in scheduling["RunJediVariationalExecutable"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[EvaJediLog-{{model_component}}]] script = "swell task EvaJediLog $config -d $datetime -m {{model_component}}" @@ -165,9 +165,9 @@ platform = {{platform}} execution time limit = {{scheduling["EvaObservations"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} + {%- for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[SaveObsDiags-{{model_component}}]] script = "swell task SaveObsDiags $config -d $datetime -m {{model_component}}" diff --git a/src/swell/suites/3dvar_atmos/suite_questions.yaml b/src/swell/suites/3dvar_atmos/suite_questions.yaml deleted file mode 100644 index b7032d19..00000000 --- a/src/swell/suites/3dvar_atmos/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -runahead_limit: - ask_question: True - default_value: 'P3' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/suites/3dvar_cycle/flow.cylc b/src/swell/suites/3dvar_cycle/flow.cylc index 49cf832a..8eaae478 100644 --- a/src/swell/suites/3dvar_cycle/flow.cylc +++ b/src/swell/suites/3dvar_cycle/flow.cylc @@ -143,9 +143,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildGeos"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildGeos"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildGeos"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[CloneJedi]] script = "swell task CloneJedi $config" @@ -158,18 +158,18 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[RunGeosExecutable]] script = "swell task RunGeosExecutable $config -d $datetime" platform = {{platform}} execution time limit = {{scheduling["RunGeosExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunGeosExecutable"]["directives"]["all"].items() %} + {%- for key, value in scheduling["RunGeosExecutable"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[PrepGeosRunDir]] script = "swell task PrepGeosRunDir $config -d $datetime" @@ -208,9 +208,9 @@ platform = {{platform}} execution time limit = {{scheduling["RunJediVariationalExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediVariationalExecutable"]["directives"][model_component].items() %} + {%- for key, value in scheduling["RunJediVariationalExecutable"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[EvaJediLog-{{model_component}}]] script = "swell task EvaJediLog $config -d $datetime -m {{model_component}}" @@ -220,9 +220,9 @@ platform = {{platform}} execution time limit = {{scheduling["EvaObservations"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} + {%- for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[SaveObsDiags-{{model_component}}]] script = "swell task SaveObsDiags $config -d $datetime -m {{model_component}}" diff --git a/src/swell/suites/3dvar_cycle/suite_questions.yaml b/src/swell/suites/3dvar_cycle/suite_questions.yaml deleted file mode 100644 index b70c1c46..00000000 --- a/src/swell/suites/3dvar_cycle/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -runahead_limit: - ask_question: True - default_value: 'P2' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/suites/build_geos/flow.cylc b/src/swell/suites/build_geos/flow.cylc index 46933a8b..dce2ce3d 100644 --- a/src/swell/suites/build_geos/flow.cylc +++ b/src/swell/suites/build_geos/flow.cylc @@ -49,8 +49,8 @@ platform = {{platform}} execution time limit = {{scheduling["BuildGeos"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildGeos"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildGeos"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} # -------------------------------------------------------------------------------------------------- diff --git a/src/swell/suites/build_jedi/flow.cylc b/src/swell/suites/build_jedi/flow.cylc index d6ed6513..b7e347de 100644 --- a/src/swell/suites/build_jedi/flow.cylc +++ b/src/swell/suites/build_jedi/flow.cylc @@ -49,8 +49,8 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} # -------------------------------------------------------------------------------------------------- diff --git a/src/swell/suites/convert_ncdiags/flow.cylc b/src/swell/suites/convert_ncdiags/flow.cylc index 1f7c6bbd..c43986dc 100644 --- a/src/swell/suites/convert_ncdiags/flow.cylc +++ b/src/swell/suites/convert_ncdiags/flow.cylc @@ -80,9 +80,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[ GetGsiBc ]] script = "swell task GetGsiBc $config -d $datetime -m geos_atmosphere" diff --git a/src/swell/suites/convert_ncdiags/suite_questions.yaml b/src/swell/suites/convert_ncdiags/suite_questions.yaml deleted file mode 100644 index a5c1d9e8..00000000 --- a/src/swell/suites/convert_ncdiags/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -runahead_limit: - ask_question: False - default_value: 'P4' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/suites/forecast_geos/flow.cylc b/src/swell/suites/forecast_geos/flow.cylc index 926ba8f8..cc8d74de 100644 --- a/src/swell/suites/forecast_geos/flow.cylc +++ b/src/swell/suites/forecast_geos/flow.cylc @@ -85,9 +85,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildGeos"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildGeos"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildGeos"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[PrepGeosRunDir]] script = "swell task PrepGeosRunDir $config -d $datetime" @@ -109,8 +109,8 @@ platform = {{platform}} execution time limit = {{scheduling["RunGeosExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunGeosExecutable"]["directives"]["all"].items() %} + {%- for key, value in scheduling["RunGeosExecutable"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} # -------------------------------------------------------------------------------------------------- diff --git a/src/swell/suites/forecast_geos/suite_questions.yaml b/src/swell/suites/forecast_geos/suite_questions.yaml deleted file mode 100644 index af75f762..00000000 --- a/src/swell/suites/forecast_geos/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: - - T00 - - T06 - - T12 - - T18 - options: - - T00 - - T06 - - T12 - - T18 - prompt: Enter the forecast times. - type: string-check-list diff --git a/src/swell/suites/geosadas/flow.cylc b/src/swell/suites/geosadas/flow.cylc index 670e2d1a..f96b3d58 100644 --- a/src/swell/suites/geosadas/flow.cylc +++ b/src/swell/suites/geosadas/flow.cylc @@ -111,9 +111,9 @@ platform = {{platform}} execution time limit = {{scheduling["RunJediVariationalExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediVariationalExecutable"]["directives"]["all"].items() %} + {%- for key, value in scheduling["RunJediVariationalExecutable"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[CleanCycle]] script = "swell task CleanCycle $config -d $datetime -m geos_atmosphere" diff --git a/src/swell/suites/geosadas/suite_questions.yaml b/src/swell/suites/geosadas/suite_questions.yaml deleted file mode 100644 index 3e3090ee..00000000 --- a/src/swell/suites/geosadas/suite_questions.yaml +++ /dev/null @@ -1,26 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/suites/hofx/flow.cylc b/src/swell/suites/hofx/flow.cylc index e8c65216..88de6aca 100644 --- a/src/swell/suites/hofx/flow.cylc +++ b/src/swell/suites/hofx/flow.cylc @@ -112,9 +112,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} {% for model_component in model_components %} @@ -141,18 +141,18 @@ platform = {{platform}} execution time limit = {{scheduling["RunJediHofxExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediHofxExecutable"]["directives"][model_component].items() %} + {%- for key, value in scheduling["RunJediHofxExecutable"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[EvaObservations-{{model_component}}]] script = "swell task EvaObservations $config -d $datetime -m {{model_component}}" platform = {{platform}} execution time limit = {{scheduling["EvaObservations"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} + {%- for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[SaveObsDiags-{{model_component}}]] script = "swell task SaveObsDiags $config -d $datetime -m {{model_component}}" diff --git a/src/swell/suites/hofx/suite_questions.yaml b/src/swell/suites/hofx/suite_questions.yaml deleted file mode 100644 index 71c889f6..00000000 --- a/src/swell/suites/hofx/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -runahead_limit: - ask_question: True - default_value: 'P4' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/suites/localensembleda/flow.cylc b/src/swell/suites/localensembleda/flow.cylc index d5da3f70..0d4256bf 100644 --- a/src/swell/suites/localensembleda/flow.cylc +++ b/src/swell/suites/localensembleda/flow.cylc @@ -133,9 +133,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} {% for model_component in model_components %} @@ -176,18 +176,18 @@ platform = {{platform}} execution time limit = {{scheduling["RunJediLocalEnsembleDaExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediLocalEnsembleDaExecutable"]["directives"][model_component].items() %} + {%- for key, value in scheduling["RunJediLocalEnsembleDaExecutable"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[EvaObservations-{{model_component}}]] script = "swell task EvaObservations $config -d $datetime -m {{model_component}}" platform = {{platform}} execution time limit = {{scheduling["EvaObservations"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} + {%- for key, value in scheduling["EvaObservations"]["directives"][model_component].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[SaveObsDiags-{{model_component}}]] script = "swell task SaveObsDiags $config -d $datetime -m {{model_component}}" diff --git a/src/swell/suites/localensembleda/suite_questions.yaml b/src/swell/suites/localensembleda/suite_questions.yaml deleted file mode 100644 index 3b6376bd..00000000 --- a/src/swell/suites/localensembleda/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -runahead_limit: - ask_question: True - default_value: 'P4' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string diff --git a/src/swell/suites/suite_questions.yaml b/src/swell/suites/suite_questions.yaml new file mode 100644 index 00000000..3ede7d91 --- /dev/null +++ b/src/swell/suites/suite_questions.yaml @@ -0,0 +1,128 @@ +cycle_times: + ask_question: True + default_value: defer_to_model + options: defer_to_model + models: + - all + suites: + - 3dfgat_atmos + - 3dvar_atmos + - 3dvar_cycle + - 3dvar + - convert_ncdiags + - forecast_geos + - hofx + - localensembleda + - ufo_testing + prompt: Enter the cycle times for this model. + type: string-check-list + +ensemble_hofx_strategy: + ask_question: False + default_value: 'defer_to_model' + models: + - all + suites: + - localensembleda + prompt: Enter the ensemble hofx strategy. + type: string + +ensemble_hofx_packets: + ask_question: False + default_value: 'defer_to_model' + models: + - all + suites: + - localensembleda + prompt: Enter the number of ensemble packets. + type: string + +experiment_id: + ask_question: True + default_value: defer_to_code + prompt: What is the experiment id? + suites: + - all + type: string + +experiment_root: + ask_question: True + default_value: defer_to_platform + prompt: What is the experiment root (the directory where the experiment will be stored)? + suites: + - all + type: string + +start_cycle_point: + ask_question: True + default_value: '2021-12-12T00:00:00Z' + prompt: What is the time of the first cycle (middle of the window)? + suites: + - 3dfgat_atmos + - 3dvar_atmos + - 3dvar_cycle + - 3dvar + - convert_ncdiags + - forecast_geos + - hofx + - localensembleda + - ufo_testing + type: iso-datetime + +final_cycle_point: + ask_question: True + default_value: '2021-12-12T06:00:00Z' + prompt: What is the time of the final cycle (middle of the window)? + suites: + - 3dfgat_atmos + - 3dvar_atmos + - 3dvar_cycle + - 3dvar + - convert_ncdiags + - forecast_geos + - hofx + - localensembleda + - ufo_testing + type: iso-datetime + +runahead_limit: + ask_question: True + default_value: 'P4' + prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? + suites: + - 3dfgat_atmos + - 3dvar_atmos + - 3dvar + - 3dvar_cycle + - convert_ncdiags + - hofx + - localensembleda + - ufo_testing + type: string + +model_components: + ask_question: True + default_value: defer_to_code + options: defer_to_code + suites: + - 3dfgat_atmos + - 3dvar_atmos + - 3dvar_cycle + - 3dvar + - convert_ncdiags + - hofx + - localensembleda + - ufo_testing + prompt: Enter the model components for this model. + type: string-check-list + +window_type: + ask_question: False + default_value: defer_to_model + options: ['3D', '4D'] + models: + - all + suites: + - hofx + prompt: Enter the window type for this model. + type: string-drop-list diff --git a/src/swell/suites/ufo_testing/flow.cylc b/src/swell/suites/ufo_testing/flow.cylc index 1e6a1552..66e8fb88 100644 --- a/src/swell/suites/ufo_testing/flow.cylc +++ b/src/swell/suites/ufo_testing/flow.cylc @@ -98,9 +98,9 @@ platform = {{platform}} execution time limit = {{scheduling["BuildJedi"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} + {%- for key, value in scheduling["BuildJedi"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[CloneGeosMksi]] script = "swell task CloneGeosMksi $config -m geos_atmosphere" @@ -128,18 +128,18 @@ platform = {{platform}} execution time limit = {{scheduling["RunJediUfoTestsExecutable"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["RunJediUfoTestsExecutable"]["directives"]["all"].items() %} + {%- for key, value in scheduling["RunJediUfoTestsExecutable"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[EvaObservations]] script = "swell task EvaObservations $config -d $datetime -m geos_atmosphere" platform = {{platform}} execution time limit = {{scheduling["EvaObservations"]["execution_time_limit"]}} [[[directives]]] - {% for key, value in scheduling["EvaObservations"]["directives"]["all"].items() %} + {%- for key, value in scheduling["EvaObservations"]["directives"]["all"].items() %} --{{key}} = {{value}} - {% endfor %} + {%- endfor %} [[CleanCycle]] script = "swell task CleanCycle $config -d $datetime -m geos_atmosphere" diff --git a/src/swell/suites/ufo_testing/suite_questions.yaml b/src/swell/suites/ufo_testing/suite_questions.yaml deleted file mode 100644 index 71c889f6..00000000 --- a/src/swell/suites/ufo_testing/suite_questions.yaml +++ /dev/null @@ -1,32 +0,0 @@ -start_cycle_point: - ask_question: True - default_value: '2021-12-12T00:00:00Z' - prompt: What is the time of the first cycle (middle of the window)? - type: iso-datetime - -final_cycle_point: - ask_question: True - default_value: '2021-12-12T06:00:00Z' - prompt: What is the time of the final cycle (middle of the window)? - type: iso-datetime - -runahead_limit: - ask_question: True - default_value: 'P4' - prompt: Since this suite is non-cycling choose how many hours the workflow can run ahead? - type: string - -r2d2_local_path: - ask_question: False - default_value: defer_to_platform - prompt: Enter the path where R2D2 will store experiment output - type: string - -cycle_times: - ask_question: True - default_value: defer_to_model - options: defer_to_model - models: - - all - prompt: Enter the cycle times for this model. - type: string-check-list diff --git a/src/swell/swell.py b/src/swell/swell.py index c9efa40b..4de4b564 100644 --- a/src/swell/swell.py +++ b/src/swell/swell.py @@ -12,7 +12,6 @@ from swell.deployment.platforms.platforms import get_platforms from swell.deployment.create_experiment import clone_config, create_experiment_directory -from swell.deployment.create_experiment import prepare_config from swell.deployment.launch_experiment import launch_experiment from swell.tasks.base.task_base import task_wrapper, get_tasks from swell.test.test_driver import test_wrapper, valid_tests @@ -104,11 +103,8 @@ def create(suite, input_method, platform, override, advanced, slurm): suite (str): Name of the suite you wish to run. \n """ - # First create the configuration for the experiment. - experiment_dict_str = prepare_config(suite, input_method, platform, override, advanced, slurm) - # Create the experiment directory - create_experiment_directory(experiment_dict_str) + create_experiment_directory(suite, input_method, platform, override, advanced, slurm) # -------------------------------------------------------------------------------------------------- diff --git a/src/swell/tasks/get_background.py b/src/swell/tasks/get_background.py index a7196ee7..2a812571 100644 --- a/src/swell/tasks/get_background.py +++ b/src/swell/tasks/get_background.py @@ -9,6 +9,7 @@ from swell.tasks.base.task_base import taskBase +from swell.utilities.r2d2 import create_r2d2_config import isodate import os @@ -49,6 +50,7 @@ def execute(self): window_length = self.config.window_length() window_offset = self.config.window_offset() window_type = self.config.window_type() + r2d2_local_path = self.config.r2d2_local_path() # Get window parameters local_background_time = self.da_window_params.local_background_time(window_offset, @@ -57,6 +59,10 @@ def execute(self): # Add to jedi config rendering dictionary self.jedi_rendering.add_key('local_background_time', local_background_time) + # Set R2D2 config file + # -------------------- + create_r2d2_config(self.logger, self.platform(), self.cycle_dir(), r2d2_local_path) + # Convert to datetime durations # ----------------------------- window_length_dur = isodate.parse_duration(window_length) diff --git a/src/swell/tasks/get_geovals.py b/src/swell/tasks/get_geovals.py index e215852c..d4b63e94 100644 --- a/src/swell/tasks/get_geovals.py +++ b/src/swell/tasks/get_geovals.py @@ -11,6 +11,7 @@ import os from swell.tasks.base.task_base import taskBase +from swell.utilities.r2d2 import create_r2d2_config from r2d2 import fetch @@ -29,12 +30,17 @@ def execute(self): observations = self.config.observations() window_length = self.config.window_length() crtm_coeff_dir = self.config.crtm_coeff_dir(None) + r2d2_local_path = self.config.r2d2_local_path() # Get window begin time window_begin = self.da_window_params.window_begin(window_offset) background_time = self.da_window_params.background_time(window_offset, background_time_offset) + # Set R2D2 config file + # -------------------- + create_r2d2_config(self.logger, self.platform(), self.cycle_dir(), r2d2_local_path) + # Add to JEDI template rendering dictionary self.jedi_rendering.add_key('background_time', background_time) self.jedi_rendering.add_key('crtm_coeff_dir', crtm_coeff_dir) diff --git a/src/swell/tasks/get_observations.py b/src/swell/tasks/get_observations.py index 853f0fdb..fb023ca6 100644 --- a/src/swell/tasks/get_observations.py +++ b/src/swell/tasks/get_observations.py @@ -13,6 +13,7 @@ from datetime import timedelta, datetime as dt from swell.tasks.base.task_base import taskBase +from swell.utilities.r2d2 import create_r2d2_config from swell.utilities.datetime import datetime_formats from r2d2 import fetch @@ -96,6 +97,7 @@ def execute(self): window_length = self.config.window_length() crtm_coeff_dir = self.config.crtm_coeff_dir(None) window_offset = self.config.window_offset() + r2d2_local_path = self.config.r2d2_local_path() # Set the observing system records path self.jedi_rendering.set_obs_records_path(self.config.observing_system_records_path(None)) @@ -123,6 +125,10 @@ def execute(self): self.jedi_rendering.add_key('crtm_coeff_dir', crtm_coeff_dir) self.jedi_rendering.add_key('window_begin', window_begin) + # Set R2D2 config file + # -------------------- + create_r2d2_config(self.logger, self.platform(), self.cycle_dir(), r2d2_local_path) + # Loop over observation operators # ------------------------------- for observation in observations: diff --git a/src/swell/tasks/save_obs_diags.py b/src/swell/tasks/save_obs_diags.py index c2401ee0..02947311 100644 --- a/src/swell/tasks/save_obs_diags.py +++ b/src/swell/tasks/save_obs_diags.py @@ -10,6 +10,7 @@ import os from swell.tasks.base.task_base import taskBase from r2d2 import store +from swell.utilities.r2d2 import create_r2d2_config from swell.utilities.run_jedi_executables import check_obs # -------------------------------------------------------------------------------------------------- @@ -29,6 +30,7 @@ def execute(self): crtm_coeff_dir = self.config.crtm_coeff_dir(None) observations = self.config.observations() window_offset = self.config.window_offset() + r2d2_local_path = self.config.r2d2_local_path() # Set the observing system records path self.jedi_rendering.set_obs_records_path(self.config.observing_system_records_path(None)) @@ -43,6 +45,10 @@ def execute(self): self.jedi_rendering.add_key('crtm_coeff_dir', crtm_coeff_dir) self.jedi_rendering.add_key('window_begin', window_begin) + # Set R2D2 config file + # -------------------- + create_r2d2_config(self.logger, self.platform(), self.cycle_dir(), r2d2_local_path) + # Loop over observation operators # ------------------------------- for observation in observations: diff --git a/src/swell/tasks/stage_jedi.py b/src/swell/tasks/stage_jedi.py index 47b2dad4..e36d6f93 100644 --- a/src/swell/tasks/stage_jedi.py +++ b/src/swell/tasks/stage_jedi.py @@ -33,7 +33,7 @@ def execute(self): horizontal_resolution = self.config.horizontal_resolution() swell_static_files = self.config.swell_static_files() vertical_resolution = self.config.vertical_resolution() - gsibec_configuration = self.config.gsibec_configuration() + gsibec_configuration = self.config.gsibec_configuration(None) # Add jedi interface template keys self.jedi_rendering.add_key('horizontal_resolution', horizontal_resolution) diff --git a/src/swell/tasks/store_background.py b/src/swell/tasks/store_background.py index f6e76f8f..b584ddde 100644 --- a/src/swell/tasks/store_background.py +++ b/src/swell/tasks/store_background.py @@ -16,6 +16,7 @@ from swell.tasks.base.task_base import taskBase from swell.utilities.datetime import datetime_formats +from swell.utilities.r2d2 import create_r2d2_config # -------------------------------------------------------------------------------------------------- @@ -47,6 +48,7 @@ def execute(self): window_offset = self.config.window_offset() background_experiment = self.config.background_experiment() background_frequency = self.config.background_frequency() + r2d2_local_path = self.config.r2d2_local_path() # Position relative to center of the window where forecast starts forecast_offset = self.config.analysis_forecast_window_offset() @@ -56,6 +58,10 @@ def execute(self): window_offset_dur = isodate.parse_duration(window_offset) forecast_offset_dur = isodate.parse_duration(forecast_offset) + # Set R2D2 config file + # -------------------- + create_r2d2_config(self.logger, self.platform(), self.cycle_dir(), r2d2_local_path) + # Depending on window type get the time of the background if window_type == "3D": # Single background at the middle of the window diff --git a/src/swell/tasks/task_questions.yaml b/src/swell/tasks/task_questions.yaml index 19c5d8d0..a9fba6cb 100644 --- a/src/swell/tasks/task_questions.yaml +++ b/src/swell/tasks/task_questions.yaml @@ -10,7 +10,7 @@ analysis_forecast_window_offset: - MoveDaRestart - PrepareAnalysis - StoreBackground - type: iso-duration + type: string-check-list analysis_variables: ask_question: false @@ -116,7 +116,7 @@ clean_patterns: prompt: Provide a list of patterns that you wish to remove from the cycle directory. tasks: - CleanCycle - type: string-list + type: string-check-list crtm_coeff_dir: ask_question: false @@ -221,8 +221,6 @@ existing_jedi_source_directory: forecast_duration: ask_question: true default_value: PT12H - models: - - all prompt: GEOS forecast duration tasks: - MoveForecastRestart @@ -287,8 +285,6 @@ geos_build_method: geos_experiment_directory: ask_question: true default_value: defer_to_platform - models: - - all prompt: What is the path to the GEOS restarts directory? tasks: - PrepGeosRunDir @@ -305,8 +301,6 @@ geos_gcm_tag: geos_restarts_directory: ask_question: true default_value: defer_to_platform - models: - - all prompt: What is the path to the GEOS restarts directory? tasks: - GetGeosRestart @@ -340,18 +334,18 @@ gradient_norm_reduction: prompt: What value of gradient norm reduction for convergence? tasks: - RunJediVariationalExecutable - type: string-drop-list + type: string gsibec_configuration: ask_question: false default_value: defer_to_model models: - - all + - geos_atmosphere prompt: Which GSIBEC climatological or hybrid? tasks: - RunJediVariationalExecutable - StageJedi - type: float + type: string horizontal_localization_lengthscale: ask_question: false @@ -737,6 +731,18 @@ produce_geovals: - GsiNcdiagToIoda type: boolean +r2d2_local_path: + ask_question: false + default_value: defer_to_platform + prompt: What is the path to the R2D2 local directory? + tasks: + - GetBackground + - GetGeovals + - GetObservations + - SaveObsDiags + - StoreBackground + type: string + save_geovals: ask_question: false default_value: false diff --git a/src/swell/test/suite_tests/3dfgat_atmos-tier1.yaml b/src/swell/test/suite_tests/3dfgat_atmos-tier1.yaml index 951da11a..23f4f985 100644 --- a/src/swell/test/suite_tests/3dfgat_atmos-tier1.yaml +++ b/src/swell/test/suite_tests/3dfgat_atmos-tier1.yaml @@ -1,5 +1,6 @@ jedi_build_method: use_existing model_components: ['geos_atmosphere'] +runahead_limit: 'P3' models: geos_atmosphere: cycle_times: diff --git a/src/swell/test/suite_tests/3dvar_atmos-tier1.yaml b/src/swell/test/suite_tests/3dvar_atmos-tier1.yaml index 652f9dba..1ccbe632 100644 --- a/src/swell/test/suite_tests/3dvar_atmos-tier1.yaml +++ b/src/swell/test/suite_tests/3dvar_atmos-tier1.yaml @@ -1,5 +1,6 @@ start_cycle_point: '2021-12-12T00:00:00Z' final_cycle_point: '2021-12-12T06:00:00Z' +runahead_limit: 'P3' jedi_build_method: use_existing model_components: ['geos_atmosphere'] models: diff --git a/src/swell/test/suite_tests/3dvar_cycle-tier1.yaml b/src/swell/test/suite_tests/3dvar_cycle-tier1.yaml index fa214b15..bcc63f38 100644 --- a/src/swell/test/suite_tests/3dvar_cycle-tier1.yaml +++ b/src/swell/test/suite_tests/3dvar_cycle-tier1.yaml @@ -1,5 +1,6 @@ start_cycle_point: '2021-06-01T12:00:00Z' final_cycle_point: '2021-06-02T00:00:00Z' +runahead_limit: P2 jedi_build_method: use_existing geos_build_method: use_existing model_components: ['geos_ocean'] diff --git a/src/swell/test/suite_tests/forecast_geos-tier1.yaml b/src/swell/test/suite_tests/forecast_geos-tier1.yaml index 8bfe3c50..28574b06 100644 --- a/src/swell/test/suite_tests/forecast_geos-tier1.yaml +++ b/src/swell/test/suite_tests/forecast_geos-tier1.yaml @@ -1,4 +1,5 @@ start_cycle_point: '2021-06-20T00:00:00Z' final_cycle_point: '2021-06-21T00:00:00Z' +cycle_times: ['T00', 'T06', 'T12', 'T18'] geos_build_method: use_existing forecast_duration: PT6H diff --git a/src/swell/test/suite_tests/hofx-tier1.yaml b/src/swell/test/suite_tests/hofx-tier1.yaml index 9f0e5fff..199fcd73 100644 --- a/src/swell/test/suite_tests/hofx-tier1.yaml +++ b/src/swell/test/suite_tests/hofx-tier1.yaml @@ -1,5 +1,6 @@ jedi_build_method: use_existing save_geovals: true +model_components: ['geos_atmosphere', 'geos_ocean'] models: geos_atmosphere: horizontal_resolution: '91' diff --git a/src/swell/test/suite_tests/localensembleda-tier1.yaml b/src/swell/test/suite_tests/localensembleda-tier1.yaml index ab6425ad..6eceb305 100644 --- a/src/swell/test/suite_tests/localensembleda-tier1.yaml +++ b/src/swell/test/suite_tests/localensembleda-tier1.yaml @@ -22,3 +22,4 @@ models: - 'geos.mem*.nc4' - '*log*' - '*.txt' + window_type: 3D diff --git a/src/swell/utilities/dictionary.py b/src/swell/utilities/dictionary.py index 7bf41a06..e5d28826 100644 --- a/src/swell/utilities/dictionary.py +++ b/src/swell/utilities/dictionary.py @@ -60,8 +60,7 @@ def remove_matching_keys(d, key): # -------------------------------------------------------------------------------------------------- - -def add_comments_to_dictionary(dictionary_string, comment_dictionary): +def add_comments_to_dictionary(logger, dictionary_string, comment_dictionary): dict_str_items = dictionary_string.split('\n') diff --git a/src/swell/utilities/jinja2.py b/src/swell/utilities/jinja2.py index fee239c3..1403d3e5 100644 --- a/src/swell/utilities/jinja2.py +++ b/src/swell/utilities/jinja2.py @@ -7,28 +7,91 @@ # -------------------------------------------------------------------------------------------------- -import jinja2 +import jinja2 as j2 +from swell.utilities.logger import Logger # -------------------------------------------------------------------------------------------------- -def template_string_jinja2(logger, templated_string, dictionary_of_templates): +class SilentUndefined(j2.Undefined): + """ + A custom undefined class that doesn't raise errors when variables are missing and returns the + original template variable placeholder. - # Load the templated string - t = jinja2.Template(templated_string, trim_blocks=True, lstrip_blocks=True, - undefined=jinja2.StrictUndefined) + In order to identify which tasks are used and to define questions for the CLI + configuration method, two Jinja2 passes occur on each suite's flow.cylc files + where "swell task" commands are defined. By design, first pass leaves most of + the templates as is (non-exhaustive). Hence, this class ensures that we ignore + the exceptions defined here, silently. - # Render the templates using the dictionary - string_rendered = t.render(dictionary_of_templates) + See `ask_questions_and_configure_suite` method in `prepare_config_and_suite.py` + for more details on Jinja2 passes. + """ + def __getattr__(self, name): + # Return a new SilentUndefined instance but append the attribute access to the name. + return SilentUndefined(name=f"{self._undefined_name}.{name}") - logger.assert_abort('{{' not in string_rendered, f'In template_string_jinja2 ' + - f'the output string still contains template directives. ' + - f'{string_rendered}') + def __getitem__(self, key): + # Similar to __getattr__, return a new instance with the key access incorporated. + if isinstance(key, str): + return SilentUndefined(name=f"{self._undefined_name}['{key}']") + return SilentUndefined(name=f"{self._undefined_name}[{key}]") - logger.assert_abort('}}' not in string_rendered, f'In template_string_jinja2 ' + - f'the output string still contains template directives. ' + - f'{string_rendered}') + def items(self): + # Return an empty list when items method is called. + return [] + + def __str__(self): + # Ensure the name returned reflects the original template placeholder. + return f"{{{{ {self._undefined_name} }}}}" + + def __repr__(self): + return str(self) + + +# -------------------------------------------------------------------------------------------------- + + +def template_string_jinja2( + logger: Logger, + templated_string: str, + dictionary_of_templates: dict, + allow_unresolved: bool = False +) -> str: + + # Handling of templates that cannot be resolved + # --------------------------------------------- + undefined = SilentUndefined if allow_unresolved else j2.StrictUndefined + + # Create the Jinja2 environment + # ----------------------------- + env = j2.Environment(undefined=undefined) + + # Load the algorithm template + # --------------------------- + template = env.from_string(templated_string) + + # Render the template hierarchy + # ----------------------------- + try: + string_rendered = template.render(dictionary_of_templates) + except j2.exceptions.UndefinedError as e: + logger.abort('Resolving templates for templated_string failed with the following ' + + f'exception: {e}') + + # Extra safety checks + # ------------------- + if not allow_unresolved: + logger.assert_abort( + not (('{{' in string_rendered) or ('}}' in string_rendered)), + f""" + In template_string_jinja2, the output string still contains template directives: + ''' + {string_rendered} + ''' + """ + ) return string_rendered diff --git a/src/swell/utilities/r2d2.py b/src/swell/utilities/r2d2.py new file mode 100644 index 00000000..d29bce30 --- /dev/null +++ b/src/swell/utilities/r2d2.py @@ -0,0 +1,58 @@ +# (C) Copyright 2021- United States Government as represented by the Administrator of the +# National Aeronautics and Space Administration. All Rights Reserved. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. + +# -------------------------------------------------------------------------------------------------- + + +import os + +from swell.swell_path import get_swell_path +from swell.utilities.jinja2 import template_string_jinja2 +from swell.utilities.logger import Logger + +# -------------------------------------------------------------------------------------------------- + + +def create_r2d2_config( + logger: Logger, + platform: str, + cycle_dir: str, + r2d2_local_path: str +) -> None: + + # R2D2 config file that will be created + r2d2_config_file = os.path.join(cycle_dir, 'r2d2_config.yaml') + + # Set the environment variable R2D2_CONFIG + os.environ["R2D2_CONFIG"] = r2d2_config_file + + # If the file already exists then return + if os.path.isfile(r2d2_config_file): + return + + # Read R2D2 config file template that will be read + r2d2_config_file_template = os.path.join(get_swell_path(), 'deployment', 'platforms', platform, + 'r2d2_config.yaml') + + with open(r2d2_config_file_template, 'r') as f: + r2d2_config_file_template_str = f.read() + + # Create a dictionary containing r2d2_local_path + r2d2_config_dict = {'r2d2_local_path': r2d2_local_path} + + # Replace the template with the dictionary + r2d2_config_file_template_str = template_string_jinja2(logger, r2d2_config_file_template_str, + r2d2_config_dict) + + # Expand environment variables in templated file + r2d2_config_file_template_str = os.path.expandvars(r2d2_config_file_template_str) + + # Write the config file + with open(r2d2_config_file, 'w') as f: + f.write(r2d2_config_file_template_str) + + +# ----------------------------------------------------------------------------------------------