From bf683f500740c5b728a37ba0185ef6645813e1cf Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Thu, 9 Sep 2021 10:45:10 +1200 Subject: [PATCH 01/14] Integer flow labels. --- CHANGES.md | 6 + cylc/flow/cfgspec/globalcfg.py | 15 +- cylc/flow/data_messages.proto | 3 +- cylc/flow/data_messages_pb2.py | 109 ++-- cylc/flow/data_store_mgr.py | 23 +- cylc/flow/flow_mgr.py | 72 +++ cylc/flow/job_file.py | 4 +- cylc/flow/main_loop/prune_flow_labels.py | 24 - cylc/flow/network/resolvers.py | 38 +- cylc/flow/network/schema.py | 8 +- cylc/flow/pathutil.py | 22 +- cylc/flow/rundb.py | 41 +- cylc/flow/scheduler.py | 54 +- cylc/flow/scheduler_cli.py | 4 +- cylc/flow/scripts/dump.py | 10 +- cylc/flow/scripts/install.py | 14 +- cylc/flow/scripts/set_outputs.py | 24 +- cylc/flow/scripts/stop.py | 18 +- cylc/flow/scripts/trigger.py | 20 +- cylc/flow/scripts/validate.py | 4 +- cylc/flow/task_events_mgr.py | 129 +++-- cylc/flow/task_job_mgr.py | 31 +- cylc/flow/task_pool.py | 488 +++++++----------- cylc/flow/task_proxy.py | 54 +- cylc/flow/task_state.py | 19 +- cylc/flow/workflow_db_mgr.py | 26 +- cylc/flow/workflow_files.py | 59 +-- setup.cfg | 1 - .../cylc-poll/03-poll-all/flow.cylc | 4 +- .../cylc-poll/16-execution-time-limit.t | 9 +- tests/flakyfunctional/database/00-simple.t | 1 + .../database/00-simple/schema.out | 5 +- .../flakyfunctional/events/01-task/events.log | 2 +- .../hold-release/14-hold-kill/flow.cylc | 4 +- .../hold-release/15-hold-after/flow.cylc | 3 +- .../functional/authentication/00-shared-fs.t | 4 +- .../functional/broadcast/07-timeout/flow.cylc | 5 +- .../functional/cylc-cat-log/05-remote-tail.t | 2 +- tests/functional/cylc-message/02-multi.t | 17 +- .../05-poll-multi-messages/flow.cylc | 6 +- tests/functional/cylc-poll/13-comm-method.t | 4 +- tests/functional/cylc-poll/14-intervals.t | 4 +- .../cylc-poll/15-job-st-file-no-batch.t | 6 +- .../cylc-remove/00-simple/flow.cylc | 2 +- .../cylc-remove/02-cycling/flow.cylc | 4 +- .../cylc-trigger/02-filter-failed/flow.cylc | 6 +- .../cylc-trigger/04-filter-names/flow.cylc | 10 +- .../cylc-trigger/05-filter-cycles/flow.cylc | 4 +- .../23-workflow-stalled-handler/flow.cylc | 2 +- .../events/38-task-event-handler-custom.t | 6 +- .../ext-trigger/01-no-nudge/flow.cylc | 2 +- .../hold-release/02-hold-on-spawn.t | 2 +- tests/functional/hold-release/05-release.t | 3 +- tests/functional/hold-release/08-hold.t | 16 +- .../hold-release/11-retrying/flow.cylc | 25 +- .../18-hold-cycle-globs/flow.cylc | 6 +- .../19-no-reset-prereq-on-waiting/flow.cylc | 2 +- .../intelligent-host-selection/02-badhosts.t | 6 +- .../01-job-nn-localhost/db.sqlite3 | 10 +- .../job-submission/19-platform_select.t | 6 +- .../logging/02-duplicates/flow.cylc | 2 +- .../12-pause-then-retry/flow.cylc | 24 +- .../pre-initial/warm-insert/flow.cylc | 4 +- tests/functional/reload/11-retrying/flow.cylc | 2 +- tests/functional/reload/14-waiting/flow.cylc | 2 +- tests/functional/reload/17-graphing-change.t | 5 +- .../reload/19-remote-kill/flow.cylc | 2 +- tests/functional/reload/runahead/flow.cylc | 2 +- tests/functional/remote/05-remote-init.t | 9 +- tests/functional/remote/06-poll.t | 4 +- .../restart/08-stop-after-cycle-point.t | 1 - .../08-stop-after-cycle-point/reference.log | 4 + tests/functional/restart/22-hold/flow.cylc | 2 +- .../restart/53-task-prerequisites/flow.cylc | 8 +- tests/functional/runahead/06-release-update.t | 2 +- .../runahead/default-future/flow.cylc | 2 +- tests/functional/spawn-on-demand/01-reflow.t | 2 +- .../spawn-on-demand/01-reflow/flow.cylc | 2 +- tests/functional/spawn-on-demand/02-merge.t | 73 +-- .../spawn-on-demand/02-merge/flow.cylc | 39 +- .../spawn-on-demand/02-merge/reference.log | 5 - .../spawn-on-demand/04-branch/flow.cylc | 2 +- .../spawn-on-demand/05-stop-flow/flow.cylc | 5 +- .../spawn-on-demand/06-stop-flow-2.t | 2 +- .../spawn-on-demand/06-stop-flow-2/flow.cylc | 9 +- .../spawn-on-demand/07-abs-triggers/flow.cylc | 2 +- .../spawn-on-demand/09-set-outputs/flow.cylc | 10 +- .../spawn-on-demand/10-retrigger/flow.cylc | 2 +- tests/functional/startup/00-state-summary.t | 2 +- .../triggering/19-and-suicide/flow.cylc | 2 +- tests/functional/xtriggers/03-sequence.t | 2 +- tests/integration/test_data_store_mgr.py | 2 +- tests/integration/test_resolvers.py | 3 +- tests/unit/test_job_file.py | 6 +- tests/unit/test_xtrigger_mgr.py | 13 +- 95 files changed, 888 insertions(+), 878 deletions(-) create mode 100644 cylc/flow/flow_mgr.py delete mode 100644 cylc/flow/main_loop/prune_flow_labels.py create mode 100644 tests/functional/restart/08-stop-after-cycle-point/reference.log diff --git a/CHANGES.md b/CHANGES.md index 1e6ceb275dc..47e35465578 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -169,6 +169,12 @@ Third beta release of Cylc 8. [#4286](https://github.com/cylc/cylc-flow/pull/4286) - Add an option for displaying source workflows in `cylc scan`. +[#4300](https://github.com/cylc/cylc-flow/pull/4300) - Integer flow labels with +flow metadata, and improved task logging. + +[#4291](https://github.com/cylc/cylc-flow/pull/4291) + - Remove obsolete `cylc edit` and `cylc search` commands. + [#4291](https://github.com/cylc/cylc-flow/pull/4291) - Remove obsolete `cylc edit` and `cylc search` commands. diff --git a/cylc/flow/cfgspec/globalcfg.py b/cylc/flow/cfgspec/globalcfg.py index fd6ce81550f..6e52b07afc4 100644 --- a/cylc/flow/cfgspec/globalcfg.py +++ b/cylc/flow/cfgspec/globalcfg.py @@ -471,7 +471,7 @@ Configuration of the Cylc Scheduler's main loop. '''): Conf('plugins', VDR.V_STRING_LIST, - ['health check', 'prune flow labels', 'reset bad hosts'], + ['health check', 'reset bad hosts'], desc=''' Configure the default main loop plugins to use when starting new workflows. @@ -491,13 +491,6 @@ The interval with which this plugin is run. ''') - with Conf('prune flow labels', meta=MainLoopPlugin, desc=''' - Prune redundant flow labels. - '''): - Conf('interval', VDR.V_INTERVAL, DurationFloat(600), desc=''' - The interval with which this plugin is run. - ''') - with Conf('reset bad hosts', meta=MainLoopPlugin, desc=''' Periodically clear the scheduler list of unreachable (bad) hosts. @@ -527,9 +520,9 @@ .. versionadded:: 8.0.0 '''): Conf('source dirs', VDR.V_STRING_LIST, default=['~/cylc-src'], desc=''' - A list of paths where ``cylc install `` will look for - a workflow of that name. All workflow source directories in these - locations will also show up in the GUI, ready for installation. + A list of paths for ``cylc install `` to search for workflow + . All workflow source directories in these locations will + also show up in the GUI, ready for installation. .. note:: If workflow source directories of the same name exist in more diff --git a/cylc/flow/data_messages.proto b/cylc/flow/data_messages.proto index 9e56ec2131b..37240d7c5af 100644 --- a/cylc/flow/data_messages.proto +++ b/cylc/flow/data_messages.proto @@ -212,8 +212,7 @@ message PbTaskProxy { optional bool is_held = 17; repeated string edges = 18; repeated string ancestors = 19; - optional string flow_label = 20; - optional bool reflow = 21; + optional string flow_nums = 20; optional PbClockTrigger clock_trigger = 22; map external_triggers = 23; map xtriggers = 24; diff --git a/cylc/flow/data_messages_pb2.py b/cylc/flow/data_messages_pb2.py index c410b949dd8..b57bc51745e 100644 --- a/cylc/flow/data_messages_pb2.py +++ b/cylc/flow/data_messages_pb2.py @@ -20,7 +20,7 @@ syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xf2\x0b\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_total\"\xd5\x08\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12\x17\n\nenv_script\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x17\n\nerr_script\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\r \x01(\tH\x0c\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\r\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0binit_script\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x13 \x01(\tH\x11\x88\x01\x01\x12\x17\n\npre_script\x18\x14 \x01(\tH\x12\x88\x01\x01\x12\x13\n\x06script\x18\x15 \x01(\tH\x13\x88\x01\x01\x12\x12\n\x05shell\x18\x16 \x01(\tH\x14\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\x17 \x01(\tH\x15\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x19 \x01(\tH\x16\x88\x01\x01\x12\x17\n\ndirectives\x18\x1a \x01(\tH\x17\x88\x01\x01\x12\x16\n\tparam_var\x18\x1c \x01(\tH\x18\x88\x01\x01\x12\x12\n\nextra_logs\x18\x1d \x03(\t\x12\x11\n\x04name\x18\x1e \x01(\tH\x19\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x1a\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\tB\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_init_scriptB\x0e\n\x0c_job_log_dirB\x0e\n\x0c_post_scriptB\r\n\x0b_pre_scriptB\t\n\x07_scriptB\x08\n\x06_shellB\x0f\n\r_work_sub_dirB\x0e\n\x0c_environmentB\r\n\x0b_directivesB\x0c\n\n_param_varB\x07\n\x05_nameB\x0e\n\x0c_cycle_point\"\xb4\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"|\n\x0ePbClockTrigger\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x18\n\x0btime_string\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0e\n\x0c_time_stringB\x0c\n\n_satisfied\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xf4\x07\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x17\n\nflow_label\x18\x14 \x01(\tH\n\x88\x01\x01\x12\x13\n\x06reflow\x18\x15 \x01(\x08H\x0b\x88\x01\x01\x12+\n\rclock_trigger\x18\x16 \x01(\x0b\x32\x0f.PbClockTriggerH\x0c\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\r\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0e\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\r\n\x0b_flow_labelB\t\n\x07_reflowB\x10\n\x0e_clock_triggerB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runahead\"\x9a\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd6\x05\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_total\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3' + serialized_pb=b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xf2\x0b\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_total\"\xd5\x08\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12\x17\n\nenv_script\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x17\n\nerr_script\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\r \x01(\tH\x0c\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\r\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0binit_script\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x13 \x01(\tH\x11\x88\x01\x01\x12\x17\n\npre_script\x18\x14 \x01(\tH\x12\x88\x01\x01\x12\x13\n\x06script\x18\x15 \x01(\tH\x13\x88\x01\x01\x12\x12\n\x05shell\x18\x16 \x01(\tH\x14\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\x17 \x01(\tH\x15\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x19 \x01(\tH\x16\x88\x01\x01\x12\x17\n\ndirectives\x18\x1a \x01(\tH\x17\x88\x01\x01\x12\x16\n\tparam_var\x18\x1c \x01(\tH\x18\x88\x01\x01\x12\x12\n\nextra_logs\x18\x1d \x03(\t\x12\x11\n\x04name\x18\x1e \x01(\tH\x19\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x1a\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\tB\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_init_scriptB\x0e\n\x0c_job_log_dirB\x0e\n\x0c_post_scriptB\r\n\x0b_pre_scriptB\t\n\x07_scriptB\x08\n\x06_shellB\x0f\n\r_work_sub_dirB\x0e\n\x0c_environmentB\r\n\x0b_directivesB\x0c\n\n_param_varB\x07\n\x05_nameB\x0e\n\x0c_cycle_point\"\xb4\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"|\n\x0ePbClockTrigger\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x18\n\x0btime_string\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0e\n\x0c_time_stringB\x0c\n\n_satisfied\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xd2\x07\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12+\n\rclock_trigger\x18\x16 \x01(\x0b\x32\x0f.PbClockTriggerH\x0b\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0c\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\r\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x10\n\x0e_clock_triggerB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runahead\"\x9a\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd6\x05\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_total\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3' ) @@ -1678,8 +1678,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=4988, - serialized_end=5045, + serialized_start=4966, + serialized_end=5023, ) _PBTASKPROXY_EXTERNALTRIGGERSENTRY = _descriptor.Descriptor( @@ -1716,8 +1716,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=5047, - serialized_end=5114, + serialized_start=5025, + serialized_end=5092, ) _PBTASKPROXY_XTRIGGERSENTRY = _descriptor.Descriptor( @@ -1754,8 +1754,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=5116, - serialized_end=5176, + serialized_start=5094, + serialized_end=5154, ) _PBTASKPROXY = _descriptor.Descriptor( @@ -1879,49 +1879,42 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='flow_label', full_name='PbTaskProxy.flow_label', index=16, + name='flow_nums', full_name='PbTaskProxy.flow_nums', index=16, number=20, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='reflow', full_name='PbTaskProxy.reflow', index=17, - number=21, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='clock_trigger', full_name='PbTaskProxy.clock_trigger', index=18, + name='clock_trigger', full_name='PbTaskProxy.clock_trigger', index=17, number=22, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='external_triggers', full_name='PbTaskProxy.external_triggers', index=19, + name='external_triggers', full_name='PbTaskProxy.external_triggers', index=18, number=23, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='xtriggers', full_name='PbTaskProxy.xtriggers', index=20, + name='xtriggers', full_name='PbTaskProxy.xtriggers', index=19, number=24, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='is_queued', full_name='PbTaskProxy.is_queued', index=21, + name='is_queued', full_name='PbTaskProxy.is_queued', index=20, number=25, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='is_runahead', full_name='PbTaskProxy.is_runahead', index=22, + name='is_runahead', full_name='PbTaskProxy.is_runahead', index=21, number=26, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, @@ -1989,33 +1982,28 @@ create_key=_descriptor._internal_create_key, fields=[]), _descriptor.OneofDescriptor( - name='_flow_label', full_name='PbTaskProxy._flow_label', + name='_flow_nums', full_name='PbTaskProxy._flow_nums', index=10, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), - _descriptor.OneofDescriptor( - name='_reflow', full_name='PbTaskProxy._reflow', - index=11, containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[]), _descriptor.OneofDescriptor( name='_clock_trigger', full_name='PbTaskProxy._clock_trigger', - index=12, containing_type=None, + index=11, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), _descriptor.OneofDescriptor( name='_is_queued', full_name='PbTaskProxy._is_queued', - index=13, containing_type=None, + index=12, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), _descriptor.OneofDescriptor( name='_is_runahead', full_name='PbTaskProxy._is_runahead', - index=14, containing_type=None, + index=13, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=4354, - serialized_end=5366, + serialized_end=5332, ) @@ -2139,8 +2127,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=5369, - serialized_end=5651, + serialized_start=5335, + serialized_end=5617, ) @@ -2405,8 +2393,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=5654, - serialized_end=6380, + serialized_start=5620, + serialized_end=6346, ) @@ -2502,8 +2490,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6383, - serialized_end=6571, + serialized_start=6349, + serialized_end=6537, ) @@ -2567,8 +2555,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6573, - serialized_end=6696, + serialized_start=6539, + serialized_end=6662, ) @@ -2646,8 +2634,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6699, - serialized_end=6941, + serialized_start=6665, + serialized_end=6907, ) @@ -2728,8 +2716,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6944, - serialized_end=7119, + serialized_start=6910, + serialized_end=7085, ) @@ -2810,8 +2798,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7122, - serialized_end=7301, + serialized_start=7088, + serialized_end=7267, ) @@ -2892,8 +2880,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7304, - serialized_end=7494, + serialized_start=7270, + serialized_end=7460, ) @@ -2974,8 +2962,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7497, - serialized_end=7670, + serialized_start=7463, + serialized_end=7636, ) @@ -3056,8 +3044,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7673, - serialized_end=7848, + serialized_start=7639, + serialized_end=7814, ) @@ -3138,8 +3126,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7851, - serialized_end=8037, + serialized_start=7817, + serialized_end=8003, ) @@ -3223,8 +3211,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=8040, - serialized_end=8235, + serialized_start=8006, + serialized_end=8201, ) @@ -3297,8 +3285,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=8238, - serialized_end=8447, + serialized_start=8204, + serialized_end=8413, ) _PBMETA.oneofs_by_name['_title'].fields.append( @@ -3631,12 +3619,9 @@ _PBTASKPROXY.oneofs_by_name['_is_held'].fields.append( _PBTASKPROXY.fields_by_name['is_held']) _PBTASKPROXY.fields_by_name['is_held'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_is_held'] -_PBTASKPROXY.oneofs_by_name['_flow_label'].fields.append( - _PBTASKPROXY.fields_by_name['flow_label']) -_PBTASKPROXY.fields_by_name['flow_label'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_flow_label'] -_PBTASKPROXY.oneofs_by_name['_reflow'].fields.append( - _PBTASKPROXY.fields_by_name['reflow']) -_PBTASKPROXY.fields_by_name['reflow'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_reflow'] +_PBTASKPROXY.oneofs_by_name['_flow_nums'].fields.append( + _PBTASKPROXY.fields_by_name['flow_nums']) +_PBTASKPROXY.fields_by_name['flow_nums'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_flow_nums'] _PBTASKPROXY.oneofs_by_name['_clock_trigger'].fields.append( _PBTASKPROXY.fields_by_name['clock_trigger']) _PBTASKPROXY.fields_by_name['clock_trigger'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_clock_trigger'] diff --git a/cylc/flow/data_store_mgr.py b/cylc/flow/data_store_mgr.py index 307cdbdead3..28700c7b9d4 100644 --- a/cylc/flow/data_store_mgr.py +++ b/cylc/flow/data_store_mgr.py @@ -627,8 +627,6 @@ def increment_graph_window( Task name. point (cylc.flow.cycling.PointBase): PointBase derived object. - flow_label (str): - Flow label used to distinguish multiple runs. edge_distance (int): Graph distance from active/origin node. active_id (str): @@ -690,13 +688,15 @@ def increment_graph_window( if edge_distance == 1: descendant = True self._expand_graph_window( - s_id, s_node, items, active_id, itask.flow_label, - itask.reflow, edge_distance, descendant, False) + s_id, s_node, items, active_id, itask.flow_nums, + edge_distance, descendant, False) + for items in generate_graph_parents( - itask.tdef, itask.point).values(): + itask.tdef, itask.point + ).values(): self._expand_graph_window( - s_id, s_node, items, active_id, itask.flow_label, - itask.reflow, edge_distance, False, True) + s_id, s_node, items, active_id, itask.flow_nums, + edge_distance, False, True) if edge_distance == 1: levels = self.n_window_boundary_nodes[active_id].keys() @@ -714,7 +714,7 @@ def increment_graph_window( self.n_window_edges[active_id]) def _expand_graph_window( - self, s_id, s_node, items, active_id, flow_label, reflow, + self, s_id, s_node, items, active_id, flow_nums, edge_distance, descendant=False, is_parent=False): """Construct nodes/edges for children/parents of source node.""" final_point = self.schd.config.final_point @@ -756,8 +756,8 @@ def _expand_graph_window( self.increment_graph_window( TaskProxy( self.schd.config.get_taskdef(t_name), - t_point, flow_label, - submit_num=0, reflow=reflow), + t_point, flow_nums, submit_num=0 + ), edge_distance, active_id, descendant, is_parent) def remove_pool_node(self, name, point): @@ -828,14 +828,13 @@ def generate_ghost_task(self, tp_id, itask, is_parent=False): depth=task_def.depth, name=name, state=TASK_STATUS_WAITING, - flow_label=itask.flow_label + flow_nums=json.dumps(list(itask.flow_nums)) ) if is_parent and tp_id not in self.n_window_nodes: # TODO: Load task info from DB, including itask prerequisites tproxy.state = TASK_STATUS_EXPIRED else: tproxy.state = TASK_STATUS_WAITING - tproxy.reflow = itask.reflow tproxy.namespace[:] = task_def.namespace if is_orphan: diff --git a/cylc/flow/flow_mgr.py b/cylc/flow/flow_mgr.py new file mode 100644 index 00000000000..e16e53f228c --- /dev/null +++ b/cylc/flow/flow_mgr.py @@ -0,0 +1,72 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Manage flow counter and flow metadata.""" + +from typing import Dict, Set, TYPE_CHECKING +from datetime import datetime, timedelta + +from cylc.flow import LOG + +if TYPE_CHECKING: + from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager + + +class FlowMgr: + """Logic to manage flow counter and flow metadata.""" + + def __init__(self, db_mgr: "WorkflowDatabaseManager") -> None: + """Initialise the flow manager.""" + self.db_mgr = db_mgr + self.counter = 0 + self.flows: Dict[int, Dict[str, str]] = {} + + def get_new_flow(self, description: str = "no description") -> int: + """Increment flow counter, record flow metadata.""" + self.counter += 1 + # record start time to nearest second + now = datetime.now() + now_sec: str = str(now - timedelta(microseconds=now.microsecond)) + self.flows[self.counter] = { + "description": description, + "start_time": now_sec + } + LOG.info( + f"New flow: {self.counter} " + f"({description}) " + f"{now_sec}" + ) + self.db_mgr.put_insert_workflow_flows( + self.counter, + self.flows[self.counter] + ) + self.db_mgr.put_workflow_params_1("flow_counter", self.counter) + self.dump_to_log() + return self.counter + + def load_flows_db(self, flow_nums: Set[int]) -> None: + """Load metadata for selected flows from DB - on restart.""" + self.flows = self.db_mgr.pri_dao.select_workflow_flows(flow_nums) + self.dump_to_log() + + def dump_to_log(self) -> None: + """Dump current flow info to log.""" + for f in self.flows: + LOG.info( + f"flow: {f}: " + f"({self.flows[f]['description']}) " + f"{self.flows[f]['start_time']} " + ) diff --git a/cylc/flow/job_file.py b/cylc/flow/job_file.py index 1de5e5d23fc..9d3e588b057 100644 --- a/cylc/flow/job_file.py +++ b/cylc/flow/job_file.py @@ -226,7 +226,9 @@ def _write_task_environment(self, handle, job_conf): handle.write( '\n export CYLC_TASK_TRY_NUMBER=%s' % job_conf['try_num']) handle.write( - '\n export CYLC_TASK_FLOW_LABEL=%s' % job_conf['flow_label']) + "\n export CYLC_TASK_FLOWS=" + f"{','.join(str(f) for f in job_conf['flow_nums'])}" + ) # Standard parameter environment variables for var, val in job_conf['param_var'].items(): handle.write('\n export CYLC_TASK_PARAM_%s="%s"' % (var, val)) diff --git a/cylc/flow/main_loop/prune_flow_labels.py b/cylc/flow/main_loop/prune_flow_labels.py deleted file mode 100644 index 7e5bf87ed8c..00000000000 --- a/cylc/flow/main_loop/prune_flow_labels.py +++ /dev/null @@ -1,24 +0,0 @@ -# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. -# Copyright (C) NIWA & British Crown (Met Office) & Contributors. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -"""Prune excess common flow labels.""" - -from cylc.flow.main_loop import periodic - - -@periodic -async def prune_flow_labels(scheduler, _): - """Prune flow labels.""" - scheduler.pool.prune_flow_labels() diff --git a/cylc/flow/network/resolvers.py b/cylc/flow/network/resolvers.py index 4b96cf81eb6..ab237f30194 100644 --- a/cylc/flow/network/resolvers.py +++ b/cylc/flow/network/resolvers.py @@ -712,7 +712,7 @@ def set_graph_window_extent(self, n_edge_distance): else: return (False, 'Edge distance cannot be negative') - def force_spawn_children(self, tasks, outputs): + def force_spawn_children(self, tasks, outputs, flow_num): """Spawn children of given task outputs. Args: @@ -729,8 +729,15 @@ def force_spawn_children(self, tasks, outputs): """ self.schd.command_queue.put( - ("force_spawn_children", (tasks,), - {'outputs': outputs})) + ( + "force_spawn_children", + (tasks,), + { + "outputs": outputs, + "flow_num": flow_num + } + ) + ) return (True, 'Command queued') def stop( @@ -739,7 +746,7 @@ def stop( cycle_point: Optional[str] = None, clock_time: Optional[str] = None, task: Optional[str] = None, - flow_label: Optional[str] = None + flow_num: Optional[int] = None ) -> Tuple[bool, str]: """Stop the workflow or specific flow from spawning any further. @@ -748,7 +755,8 @@ def stop( cycle_point: Cycle point after which to stop. clock_time: Wallclock time after which to stop. task: Stop after this task succeeds. - flow_label: The flow to sterilise. + flow_num: The flow to stop. + ): Returns: outcome: True if command successfully queued. @@ -763,19 +771,21 @@ def stop( 'cycle_point': cycle_point, 'clock_time': clock_time, 'task': task, - 'flow_label': flow_label, + 'flow_num': flow_num, }) )) return (True, 'Command queued') - def force_trigger_tasks(self, tasks, reflow=False): + def force_trigger_tasks(self, tasks, reflow, flow_descr): """Trigger submission of task jobs where possible. Args: tasks (list): List of identifiers, see `task globs`_ - reflow (bool, optional): - Start new flow(s) from triggered tasks. + reflow (bool): + Start new flow from triggered tasks. + flow_descr (str): + Description of new flow. Returns: tuple: (outcome, message) @@ -787,6 +797,12 @@ def force_trigger_tasks(self, tasks, reflow=False): """ self.schd.command_queue.put( - ("force_trigger_tasks", (tasks,), - {"reflow": reflow})) + ( + "force_trigger_tasks", (tasks,), + { + "reflow": reflow, + "flow_descr": flow_descr + } + ) + ) return (True, 'Command queued') diff --git a/cylc/flow/network/schema.py b/cylc/flow/network/schema.py index 2811242fc63..d10c69fc997 100644 --- a/cylc/flow/network/schema.py +++ b/cylc/flow/network/schema.py @@ -909,7 +909,7 @@ class Meta: is_held = Boolean() is_queued = Boolean() is_runahead = Boolean() - flow_label = String() + flow_nums = String() depth = Int() job_submits = Int() outputs = List( @@ -1735,8 +1735,8 @@ class Arguments: task = TaskID( description='Stop after this task succeeds.' ) - flow_label = String( - description='Label of flow to sterilise.' + flow_num = Int( + description='Number of flow to stop.' ) result = GenericScalar() @@ -1860,6 +1860,7 @@ class Arguments(TaskMutation.Arguments): default_value=[TASK_OUTPUT_SUCCEEDED], description='List of task outputs to satisfy.' ) + flow_num = Int() class Trigger(Mutation, TaskMutation): @@ -1877,6 +1878,7 @@ class Meta: class Arguments(TaskMutation.Arguments): reflow = Boolean() + flow_descr = String() def _mut_field(cls): diff --git a/cylc/flow/pathutil.py b/cylc/flow/pathutil.py index d2ca4619037..85428c36942 100644 --- a/cylc/flow/pathutil.py +++ b/cylc/flow/pathutil.py @@ -42,19 +42,19 @@ def expand_path(*args: Union[Path, str]) -> str: def get_remote_workflow_run_dir( - flow_name: Union[Path, str], *args: Union[Path, str] + workflow_name: Union[Path, str], *args: Union[Path, str] ) -> str: """Return remote workflow run directory, joining any extra args, NOT expanding vars or user.""" - return os.path.join(_CYLC_RUN_DIR, flow_name, *args) + return os.path.join(_CYLC_RUN_DIR, workflow_name, *args) def get_remote_workflow_run_job_dir( - flow_name: Union[Path, str], *args: Union[Path, str] + workflow_name: Union[Path, str], *args: Union[Path, str] ) -> str: """Return remote workflow job log directory, joining any extra args, NOT expanding vars or user.""" - return get_remote_workflow_run_dir(flow_name, 'log', 'job', *args) + return get_remote_workflow_run_dir(workflow_name, 'log', 'job', *args) def get_cylc_run_dir() -> str: @@ -63,14 +63,14 @@ def get_cylc_run_dir() -> str: def get_workflow_run_dir( - flow_name: Union[Path, str], *args: Union[Path, str] + workflow_name: Union[Path, str], *args: Union[Path, str] ) -> str: """Return local workflow run directory, joining any extra args, and expanding vars and user. Does not check that the directory exists. """ - return expand_path(_CYLC_RUN_DIR, flow_name, *args) + return expand_path(_CYLC_RUN_DIR, workflow_name, *args) def get_workflow_run_job_dir(workflow, *args): @@ -143,7 +143,7 @@ def make_localhost_symlinks( """Creates symlinks for any configured symlink dirs from glbl_cfg. Args: rund: the entire run directory path - named_sub_dir: e.g flow_name/run1 + named_sub_dir: e.g workflow_name/run1 symlink_conf: Symlinks dirs configuration passed from cli Returns: @@ -179,7 +179,7 @@ def make_localhost_symlinks( def get_dirs_to_symlink( install_target: str, - flow_name: str, + workflow_name: str, symlink_conf: Optional[Dict[str, Dict[str, Any]]] = None ) -> Dict[str, Any]: """Returns dictionary of directories to symlink. @@ -202,12 +202,14 @@ def get_dirs_to_symlink( return dirs_to_symlink base_dir = symlink_conf[install_target]['run'] if base_dir: - dirs_to_symlink['run'] = os.path.join(base_dir, 'cylc-run', flow_name) + dirs_to_symlink['run'] = os.path.join( + base_dir, 'cylc-run', workflow_name) for dir_ in ['log', 'share', 'share/cycle', 'work']: link = symlink_conf[install_target].get(dir_, None) if (not link) or link == base_dir: continue - dirs_to_symlink[dir_] = os.path.join(link, 'cylc-run', flow_name, dir_) + dirs_to_symlink[dir_] = os.path.join( + link, 'cylc-run', workflow_name, dir_) return dirs_to_symlink diff --git a/cylc/flow/rundb.py b/cylc/flow/rundb.py index ff951170449..96443187485 100644 --- a/cylc/flow/rundb.py +++ b/cylc/flow/rundb.py @@ -173,6 +173,7 @@ class CylcWorkflowDAO: TABLE_BROADCAST_STATES = "broadcast_states" TABLE_INHERITANCE = "inheritance" TABLE_WORKFLOW_PARAMS = "workflow_params" + TABLE_WORKFLOW_FLOWS = "workflow_flows" TABLE_WORKFLOW_TEMPLATE_VARS = "workflow_template_vars" TABLE_TASK_JOBS = "task_jobs" TABLE_TASK_EVENTS = "task_events" @@ -210,6 +211,11 @@ class CylcWorkflowDAO: ["key", {"is_primary_key": True}], ["value"], ], + TABLE_WORKFLOW_FLOWS: [ + ["flow_num", {"datatype": "INTEGER", "is_primary_key": True}], + ["start_time"], + ["description"], + ], TABLE_WORKFLOW_TEMPLATE_VARS: [ ["key", {"is_primary_key": True}], ["value"], @@ -262,7 +268,7 @@ class CylcWorkflowDAO: TABLE_TASK_POOL: [ ["cycle", {"is_primary_key": True}], ["name", {"is_primary_key": True}], - ["flow_label", {"is_primary_key": True}], + ["flow_nums", {"is_primary_key": True}], ["status"], ["is_held", {"datatype": "INTEGER"}], ], @@ -281,7 +287,7 @@ class CylcWorkflowDAO: TABLE_TASK_STATES: [ ["name", {"is_primary_key": True}], ["cycle", {"is_primary_key": True}], - ["flow_label", {"is_primary_key": True}], + ["flow_nums", {"is_primary_key": True}], ["time_created"], ["time_updated"], ["submit_num", {"datatype": "INTEGER"}], @@ -503,6 +509,21 @@ def select_workflow_params(self, callback): for row_idx, row in enumerate(self.connect().execute(stmt)): callback(row_idx, list(row)) + def select_workflow_flows(self, flow_nums): + """Return flow data for selected flows.""" + stmt = ( + f"SELECT flow_num, start_time, description " + f"FROM {self.TABLE_WORKFLOW_FLOWS} " + f"WHERE flow_num in ({','.join(str(f) for f in flow_nums)})" + ) + flows = {} + for flow_num, start_time, descr in self.connect().execute(stmt): + flows[flow_num] = { + "start_time": start_time, + "description": descr + } + return flows + def select_workflow_params_restart_count(self): """Return number of restarts in workflow_params table.""" stmt = rf""" @@ -670,12 +691,12 @@ def select_task_job_platforms(self): return {i[0] for i in self.connect().execute(stmt)} def select_submit_nums(self, name, point): - """Select submit_num and flow_label from task_states table. + """Select submit_num and flow_nums from task_states table. - Fetch submit number and flow label for spawning task name.point. + Fetch submit number and flow_nums for spawning task name.point. Return: { - flow_label: submit_num, + flow_nums: submit_num, ..., } @@ -687,13 +708,13 @@ def select_submit_nums(self, name, point): # Not an injection, simply putting the table name in the SQL query # expression as a string constant local to this module. stmt = ( # nosec - r"SELECT flow_label,submit_num FROM %(name)s" + r"SELECT flow_nums,submit_num FROM %(name)s" r" WHERE name==? AND cycle==?" ) % {"name": self.TABLE_TASK_STATES} ret = {} - for flow_label, submit_num in self.connect().execute( + for flow_nums, submit_num in self.connect().execute( stmt, (name, point,)): - ret[flow_label] = submit_num + ret[flow_nums] = submit_num return ret def select_xtriggers_for_restart(self, callback): @@ -738,7 +759,7 @@ def select_task_pool_for_restart(self, callback): SELECT %(task_pool)s.cycle, %(task_pool)s.name, - %(task_pool)s.flow_label, + %(task_pool)s.flow_nums, %(task_late_flags)s.value, %(task_pool)s.status, %(task_pool)s.is_held, @@ -755,7 +776,7 @@ def select_task_pool_for_restart(self, callback): %(task_states)s ON %(task_pool)s.cycle == %(task_states)s.cycle AND %(task_pool)s.name == %(task_states)s.name AND - %(task_pool)s.flow_label == %(task_states)s.flow_label + %(task_pool)s.flow_nums == %(task_states)s.flow_nums LEFT OUTER JOIN %(task_late_flags)s ON %(task_pool)s.cycle == %(task_late_flags)s.cycle AND diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py index 70c4a4942ae..8af46d50e2a 100644 --- a/cylc/flow/scheduler.py +++ b/cylc/flow/scheduler.py @@ -39,12 +39,15 @@ from metomi.isodatetime.parsers import TimePointParser -from cylc.flow import LOG, main_loop, ID_DELIM, __version__ as CYLC_VERSION +from cylc.flow import ( + LOG, main_loop, ID_DELIM, __version__ as CYLC_VERSION +) from cylc.flow.broadcast_mgr import BroadcastMgr from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.config import WorkflowConfig from cylc.flow.cycling.loader import get_point from cylc.flow.data_store_mgr import DataStoreMgr, parse_job_item +from cylc.flow.flow_mgr import FlowMgr from cylc.flow.exceptions import ( CommandFailedError, CyclingError, CylcError, UserInputError ) @@ -174,6 +177,7 @@ class Scheduler: workflow_db_mgr: WorkflowDatabaseManager broadcast_mgr: BroadcastMgr xtrigger_mgr: XtriggerManager + flow_mgr: FlowMgr # queues command_queue: Queue @@ -324,6 +328,7 @@ async def initialise(self): self.data_store_mgr = DataStoreMgr(self) self.broadcast_mgr = BroadcastMgr( self.workflow_db_mgr, self.data_store_mgr) + self.flow_mgr = FlowMgr(self.workflow_db_mgr) # *** Network Related *** # TODO: this in zmq asyncio context? @@ -458,7 +463,9 @@ async def configure(self): self.config, self.workflow_db_mgr, self.task_events_mgr, - self.data_store_mgr) + self.data_store_mgr, + self.flow_mgr + ) self.is_reloaded = False self.data_store_mgr.initiate_data_model() @@ -653,7 +660,12 @@ async def run(self): def _load_pool_from_tasks(self): """Load task pool with specified tasks, for a new run.""" LOG.info(f"Start task: {self.options.starttask}") - self.pool.force_trigger_tasks(self.options.starttask, True) + # flow number set in this call: + self.pool.force_trigger_tasks( + self.options.starttask, + reflow=True, + flow_descr=f"original, from {self.options.starttask}" + ) def _load_pool_from_point(self): """Load task pool for a cycle point, for a new run. @@ -670,7 +682,9 @@ def _load_pool_from_point(self): start_type = "Warm" if self.options.startcp else "Cold" LOG.info(f"{start_type} start from {self.config.start_point}") - flow_label = self.pool.flow_label_mgr.get_new_label() + flow_num = self.flow_mgr.get_new_flow( + f"original from {self.config.start_point}" + ) for name in self.config.get_task_name_list(): if self.config.start_point is None: # No start cycle point at which to load cycling tasks. @@ -689,7 +703,8 @@ def _load_pool_from_point(self): if not parent_points or all( x < self.config.start_point for x in parent_points): self.pool.add_to_pool( - TaskProxy(tdef, point, flow_label)) + TaskProxy(tdef, point, {flow_num}) + ) def _load_pool_from_db(self): """Load task pool from DB, for a restart.""" @@ -711,6 +726,7 @@ def _load_pool_from_db(self): self.workflow_db_mgr.pri_dao.select_abs_outputs_for_restart( self.pool.load_abs_outputs_for_restart) self.pool.load_db_tasks_to_hold() + self.pool.update_flow_mgr() def restart_remote_init(self): """Remote init for all submitted/running tasks in the pool.""" @@ -857,10 +873,10 @@ def command_stop( # NOTE clock_time YYYY/MM/DD-HH:mm back-compat removed clock_time: Optional[str] = None, task: Optional[str] = None, - flow_label: Optional[str] = None + flow_num: Optional[int] = None ) -> None: - if flow_label: - self.pool.stop_flow(flow_label) + if flow_num: + self.pool.stop_flow(flow_num) return if cycle_point: @@ -935,7 +951,7 @@ def command_kill_tasks(self, items=None): if self.config.run_mode('simulation'): for itask in itasks: if itask.state(*TASK_STATUSES_ACTIVE): - itask.state.reset(TASK_STATUS_FAILED) + itask.state_reset(TASK_STATUS_FAILED) self.data_store_mgr.delta_task_state(itask) return len(bad_items) self.task_job_mgr.kill_task_jobs(self.workflow, itasks) @@ -1133,6 +1149,7 @@ def _load_workflow_params(self, row_idx, row): * Workflow UUID. * A flag to indicate if the workflow should be paused or not. * Original workflow run time zone. + * flow counter """ if row_idx == 0: LOG.info('LOADING workflow parameters') @@ -1196,6 +1213,9 @@ def _load_workflow_params(self, row_idx, row): elif key == self.workflow_db_mgr.KEY_CYCLE_POINT_TIME_ZONE: self.options.cycle_point_tz = value LOG.info(f"+ cycle point time zone = {value}") + elif key == self.workflow_db_mgr.KEY_FLOW_COUNTER: + self.flow_mgr.counter = int(value) + LOG.info(f"+ flow counter = {value}") def _load_template_vars(self, _, row): """Load workflow start up template variables.""" @@ -1257,10 +1277,10 @@ def release_queued_tasks(self): self.client_pub_key_dir, self.config.run_mode('simulation') ): - # TODO log flow labels here (beware effect on ref tests) + # (Not using f"{itask}"_here to avoid breaking func tests) LOG.info( - '[%s] -triggered off %s', - itask, itask.state.get_resolved_dependencies() + f"[{itask.identity}] -triggered off " + f"{itask.state.get_resolved_dependencies()}" ) def process_workflow_db_queue(self): @@ -1286,7 +1306,7 @@ def late_tasks_check(self): self.task_events_mgr.EVENT_LATE, time2str(itask.get_late_time())) itask.is_late = True - LOG.warning('[%s] -%s', itask, msg) + LOG.warning(f"[{itask}] {msg}") self.task_events_mgr.setup_event_handlers( itask, self.task_events_mgr.EVENT_LATE, msg) self.workflow_db_mgr.put_insert_task_late_flags(itask) @@ -1823,13 +1843,13 @@ def resume_workflow(self, quiet: bool = False) -> None: self.workflow_db_mgr.delete_workflow_paused() self.update_data_store() - def command_force_trigger_tasks(self, items, reflow=False): + def command_force_trigger_tasks(self, items, reflow, flow_descr): """Trigger tasks.""" - return self.pool.force_trigger_tasks(items, reflow) + return self.pool.force_trigger_tasks(items, reflow, flow_descr) - def command_force_spawn_children(self, items, outputs): + def command_force_spawn_children(self, items, outputs, flow_num): """Force spawn task successors.""" - return self.pool.force_spawn_children(items, outputs) + return self.pool.force_spawn_children(items, outputs, flow_num) def _update_profile_info(self, category, amount, amount_format="%s"): """Update the 1, 5, 15 minute dt averages for a given category.""" diff --git a/cylc/flow/scheduler_cli.py b/cylc/flow/scheduler_cli.py index fc1fedadee3..d19e057892c 100644 --- a/cylc/flow/scheduler_cli.py +++ b/cylc/flow/scheduler_cli.py @@ -93,7 +93,7 @@ """ -FLOW_NAME_ARG_DOC = ("WORKFLOW", "Workflow name or ID") +WORKFLOW_NAME_ARG_DOC = ("WORKFLOW", "Workflow name or ID") RESUME_MUTATION = ''' mutation ( @@ -116,7 +116,7 @@ def get_option_parser(add_std_opts=False): icp=True, jset=True, comms=True, - argdoc=[FLOW_NAME_ARG_DOC]) + argdoc=[WORKFLOW_NAME_ARG_DOC]) parser.add_option( "-n", "--no-detach", "--non-daemon", diff --git a/cylc/flow/scripts/dump.py b/cylc/flow/scripts/dump.py index 3aee97482da..08622bdcc1e 100755 --- a/cylc/flow/scripts/dump.py +++ b/cylc/flow/scripts/dump.py @@ -58,7 +58,7 @@ isHeld isQueued isRunahead - flowLabel + flowNums firstParent { id } @@ -152,8 +152,8 @@ def get_option_parser(): "-t", "--tasks", help="Task states only.", action="store_const", const="tasks", dest="disp_form") parser.add_option( - "-f", "--flow", help="Print flow label with tasks.", - action="store_true", default=False, dest="flow") + "-f", "--flows", help="Print flow numbers with tasks.", + action="store_true", default=False, dest="show_flows") parser.add_option( "-r", "--raw", "--raw-format", help='Display raw format.', @@ -265,8 +265,8 @@ def main(_, options: 'Values', workflow: str) -> None: else 'not-queued') values.append('runahead' if item['isRunahead'] else 'not-runahead') - if options.flow: - values.append(item['flowLabel']) + if options.show_flows: + values.append(item['flowNums']) print(', '.join(values)) except Exception as exc: raise CylcError( diff --git a/cylc/flow/scripts/install.py b/cylc/flow/scripts/install.py index f0b79003fd7..a0083c5e389 100755 --- a/cylc/flow/scripts/install.py +++ b/cylc/flow/scripts/install.py @@ -91,11 +91,11 @@ def get_option_parser(): parser.add_option( "--flow-name", - help="Install into ~/cylc-run//runN ", + help="Install into ~/cylc-run//runN ", action="store", - metavar="FLOW_NAME", + metavar="WORKFLOW_NAME", default=None, - dest="flow_name") + dest="workflow_name") parser.add_option( "--directory", "-C", @@ -127,7 +127,7 @@ def get_option_parser(): parser.add_option( "--no-run-name", - help="Install the workflow directly into ~/cylc-run/", + help="Install the workflow directly into ~/cylc-run/", action="store_true", default=False, dest="no_run_name") @@ -156,7 +156,7 @@ def install( parser.error( "WORKFLOW_NAME and --directory are mutually exclusive.") source = search_install_source_dirs(reg) - flow_name = opts.flow_name or reg + workflow_name = opts.workflow_name or reg for entry_point in iter_entry_points( 'cylc.pre_configure' @@ -180,8 +180,8 @@ def install( cli_symdirs = {} elif opts.symlink_dirs: cli_symdirs = parse_cli_sym_dirs(opts.symlink_dirs) - source_dir, rundir, _flow_name = install_workflow( - flow_name=flow_name, + source_dir, rundir, _workflow_name = install_workflow( + workflow_name=workflow_name, source=source, run_name=opts.run_name, no_run_name=opts.no_run_name, diff --git a/cylc/flow/scripts/set_outputs.py b/cylc/flow/scripts/set_outputs.py index 42530de743f..a5dac1a2262 100755 --- a/cylc/flow/scripts/set_outputs.py +++ b/cylc/flow/scripts/set_outputs.py @@ -18,15 +18,13 @@ """cylc set-outputs [OPTIONS] ARGS -Override the outputs of tasks in a running workflow. - -Tell the scheduler that specified outputs (the "succeeded" output by default) +Tell the scheduler that specified (or "succeeded", by default) outputs of tasks are complete. Downstream tasks will be spawned or updated just as if the outputs were completed normally. -The --output=OUTPUT option can be used multiple times on the command line. +The --output option can be used multiple times on the command line. """ @@ -42,11 +40,13 @@ $wFlows: [WorkflowID]!, $tasks: [NamespaceIDGlob]!, $outputs: [String], + $flowNum: Int, ) { setOutputs ( workflows: $wFlows, tasks: $tasks, - outputs: $outputs + outputs: $outputs, + flowNum: $flowNum, ) { result } @@ -60,15 +60,24 @@ def get_option_parser(): argdoc=[ ("WORKFLOW", "Workflow name or ID"), ('TASK-GLOB [...]', 'Task match pattern')]) + parser.add_option( - "--output", metavar="OUTPUT", - help="Set task output OUTPUT completed, defaults to 'succeeded'.", + "-o", "--output", metavar="OUTPUT", + help="Set OUTPUT (default \"succeeded\") completed.", action="append", dest="outputs") + + parser.add_option( + "-f", "--flow", metavar="FLOW", + help="Number of the flow to attribute the outputs.", + action="store", default=None, dest="flow_num") + return parser @cli_function(get_option_parser) def main(parser: COP, options: 'Values', reg: str, *task_globs: str) -> None: + if options.flow_num is None: + parser.error("--flow=FLOW is required.") reg, _ = parse_reg(reg) pclient = get_client(reg, timeout=options.comms_timeout) @@ -78,6 +87,7 @@ def main(parser: COP, options: 'Values', reg: str, *task_globs: str) -> None: 'wFlows': [reg], 'tasks': list(task_globs), 'outputs': options.outputs, + 'flowNum': options.flow_num } } diff --git a/cylc/flow/scripts/stop.py b/cylc/flow/scripts/stop.py index 9348364b122..fa677b9f1b4 100755 --- a/cylc/flow/scripts/stop.py +++ b/cylc/flow/scripts/stop.py @@ -61,7 +61,7 @@ $cyclePoint: CyclePoint, $clockTime: TimePoint, $task: TaskID, - $flowLabel: String, + $flowNum: Int, ) { stop ( workflows: $wFlows, @@ -69,7 +69,7 @@ cyclePoint: $cyclePoint, clockTime: $clockTime, task: $task, - flowLabel: $flowLabel + flowNum: $flowNum ) { result } @@ -122,12 +122,10 @@ def get_option_parser(): action="store_true", default=False, dest="kill") parser.add_option( - "--flow", metavar="LABEL", - help=( - "Stop a specified flow within a workflow from spawning " - "any further. The scheduler will shut down if LABEL is the " - "only flow."), - action="store", dest="flow_label") + "--flow", metavar="INT", + help="Stop flow number INT from spawning more tasks. " + "The scheduler will shut down if it is the only flow.", + action="store", dest="flow_num") parser.add_option( "-n", "--now", @@ -162,7 +160,7 @@ def main( if options.kill and options.now: parser.error("ERROR: --kill is not compatible with --now") - if options.flow_label and int(options.max_polls) > 0: + if options.flow_num and int(options.max_polls) > 0: parser.error("ERROR: --flow is not compatible with --max-polls") reg, _ = parse_reg(reg) @@ -198,7 +196,7 @@ def main( 'cyclePoint': cycle_point, 'clockTime': options.wall_clock, 'task': task, - 'flowLabel': options.flow_label, + 'flowNum': options.flow_num } } diff --git a/cylc/flow/scripts/trigger.py b/cylc/flow/scripts/trigger.py index 586282145f7..75e96014ed7 100755 --- a/cylc/flow/scripts/trigger.py +++ b/cylc/flow/scripts/trigger.py @@ -47,11 +47,13 @@ $wFlows: [WorkflowID]!, $tasks: [NamespaceIDGlob]!, $reflow: Boolean, + $flowDescr: String, ) { trigger ( workflows: $wFlows, tasks: $tasks, - reflow: $reflow + reflow: $reflow, + flowDescr: $flowDescr ) { result } @@ -67,9 +69,16 @@ def get_option_parser(): ('[TASK_GLOB ...]', 'Task matching patterns')]) parser.add_option( - "-r", "--reflow", - help="Start a new flow from the triggered task.", - action="store_true", default=False, dest="reflow") + "--reflow", action="store_true", + dest="reflow", default=False, + help="Start a new flow from the triggered task." + ) + + parser.add_option( + "--meta", metavar="DESCRIPTION", action="store", + dest="flow_descr", default="", + help="(with --reflow) a descriptive string for the new flow." + ) return parser @@ -77,6 +86,8 @@ def get_option_parser(): @cli_function(get_option_parser) def main(parser: COP, options: 'Values', workflow: str, *task_globs: str): """CLI for "cylc trigger".""" + if options.flow_descr and not options.reflow: + parser.error("--meta requires --reflow") workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) @@ -86,6 +97,7 @@ def main(parser: COP, options: 'Values', workflow: str, *task_globs: str): 'wFlows': [workflow], 'tasks': list(task_globs), 'reflow': options.reflow, + 'flowDescr': options.flow_descr, } } diff --git a/cylc/flow/scripts/validate.py b/cylc/flow/scripts/validate.py index a0339310d85..c2bc900ef03 100755 --- a/cylc/flow/scripts/validate.py +++ b/cylc/flow/scripts/validate.py @@ -43,7 +43,6 @@ Options ) from cylc.flow.profiler import Profiler -from cylc.flow.task_pool import FlowLabelMgr from cylc.flow.task_proxy import TaskProxy from cylc.flow.templatevars import get_template_vars from cylc.flow.terminal import cli_function @@ -134,10 +133,9 @@ def main(parser: COP, options: 'Values', reg: str) -> None: # TODO - This is not exhaustive, it only uses the initial cycle point. if cylc.flow.flags.verbosity > 0: print('Instantiating tasks to check trigger expressions') - flow_label = FlowLabelMgr().get_new_label() for name, taskdef in cfg.taskdefs.items(): try: - itask = TaskProxy(taskdef, cfg.start_point, flow_label) + itask = TaskProxy(taskdef, cfg.start_point) except TaskProxySequenceBoundsError: # Should already failed above mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name) diff --git a/cylc/flow/task_events_mgr.py b/cylc/flow/task_events_mgr.py index d0382e20fcb..47eb20e4561 100644 --- a/cylc/flow/task_events_mgr.py +++ b/cylc/flow/task_events_mgr.py @@ -251,7 +251,7 @@ def check_job_time(self, itask, now): msg += ' after %s' % intvl_as_str(itask.timeout - time_ref) itask.timeout = None # emit event only once if msg and event: - LOG.warning('[%s] -%s', itask, msg) + LOG.warning(f"[{itask}] {msg}") self.setup_event_handlers(itask, event, msg) return True else: @@ -293,20 +293,25 @@ def process_events(self, schd_ctx): # Set timer if timeout is None. if not timer.is_timeout_set(): if timer.next() is None: - LOG.warning("%s/%s/%02d %s failed" % ( - point, name, submit_num, key1)) + LOG.warning( + f"{point}/{name}/{submit_num:02d} {key1} failed" + ) self.remove_event_timer(id_key) continue # Report retries and delayed 1st try - tmpl = None + msg = None if timer.num > 1: - tmpl = "%s/%s/%02d %s failed, retrying in %s" + msg = ( + f"{key1} failed, " + f"retrying in {timer.delay_timeout_as_str()}" + ) elif timer.delay: - tmpl = "%s/%s/%02d %s will run after %s" - if tmpl: - LOG.debug(tmpl % ( - point, name, submit_num, key1, - timer.delay_timeout_as_str())) + msg = ( + f"{key1} will run after " + f"{timer.delay_timeout_as_str()}" + ) + if msg: + LOG.critical(f"{point}/{name}/{submit_num:02d} {msg}") # Ready to run? if not timer.is_delay_done() or ( # Avoid flooding user's mail box with mail notification. @@ -505,8 +510,8 @@ def process_message( itask.job_vacated = True # Believe this and change state without polling (could poll?). self.reset_inactivity_timer_func() - if itask.state.reset(TASK_STATUS_SUBMITTED): - itask.state.reset(is_queued=False) + if itask.state_reset(TASK_STATUS_SUBMITTED): + itask.state_reset(is_queued=False) self.data_store_mgr.delta_task_state(itask) self.data_store_mgr.delta_task_queued(itask) self._reset_job_timers(itask) @@ -527,9 +532,7 @@ def process_message( # * poll messages that repeat previous results # Note that all messages are logged already at the top. # No state change. - LOG.debug( - '[%s] status=%s: unhandled: %s', - itask, itask.state.status, message) + LOG.debug(f"[{itask}] unhandled: {message}") if severity in LOG_LEVELS.values(): severity = getLevelName(severity) self._db_events_insert( @@ -555,16 +558,16 @@ def _process_message_check( Return True if `.process_message` should contine, False otherwise. """ if self.timestamp: - timestamp = " at %s " % event_time + timestamp = f" at {event_time}" else: timestamp = "" - logfmt = r'[%s] status=%s: %s%s%s for job(%02d) flow(%s)' if flag == self.FLAG_RECEIVED and submit_num != itask.submit_num: # Ignore received messages from old jobs LOG.warning( - logfmt + r' != current job(%02d)', - itask, itask.state, self.FLAG_RECEIVED_IGNORED, message, - timestamp, submit_num, itask.flow_label, itask.submit_num) + f"[{itask}] " + f"{self.FLAG_RECEIVED_IGNORED}{message}{timestamp} " + f"for job({submit_num:02d}) != job({itask.submit_num:02d})" + ) return False if ( @@ -591,18 +594,21 @@ def _process_message_check( # (caused by polling overlapping with task failure) if flag == self.FLAG_RECEIVED: LOG.warning( - logfmt, - itask, itask.state, self.FLAG_RECEIVED_IGNORED, message, - timestamp, submit_num, itask.flow_label) + f"[{itask}] " + f"{self.FLAG_RECEIVED_IGNORED}{message}{timestamp}" + ) + else: LOG.warning( - logfmt, - itask, itask.state, self.FLAG_POLLED_IGNORED, message, - timestamp, submit_num, itask.flow_label) + f"[{itask}] " + f"{self.FLAG_POLLED_IGNORED}{message}{timestamp}" + ) return False + LOG.log( - LOG_LEVELS.get(severity, INFO), logfmt, itask, itask.state, flag, - message, timestamp, submit_num, itask.flow_label) + LOG_LEVELS.get(severity, INFO), + f"[{itask}] {flag}{message}{timestamp}" + ) return True def setup_event_handlers(self, itask, event, message): @@ -845,7 +851,7 @@ def _retry_task(self, itask, wallclock_time, submit_retry=False): os.getenv("CYLC_WORKFLOW_RUN_DIR") ) itask.state.add_xtrigger(label) - if itask.state.reset(TASK_STATUS_WAITING): + if itask.state_reset(TASK_STATUS_WAITING): self.data_store_mgr.delta_task_state(itask) def _process_message_failed(self, itask, event_time, message): @@ -871,23 +877,19 @@ def _process_message_failed(self, itask, event_time, message): or itask.try_timers[TimerFlags.EXECUTION_RETRY].next() is None ): # No retry lined up: definitive failure. - if itask.state.reset(TASK_STATUS_FAILED): + if itask.state_reset(TASK_STATUS_FAILED): self.setup_event_handlers(itask, self.EVENT_FAILED, message) self.data_store_mgr.delta_task_state(itask) - LOG.critical( - "[%s] -job(%02d) %s", itask, itask.submit_num, "failed") + LOG.critical(f"[{itask}] failed") no_retries = True else: # There is an execution retry lined up. timer = itask.try_timers[TimerFlags.EXECUTION_RETRY] self._retry_task(itask, timer.timeout) delay_msg = f"retrying in {timer.delay_timeout_as_str()}" - if itask.state.is_held: - delay_msg = "held (%s)" % delay_msg - msg = "failed, %s" % (delay_msg) - LOG.info("[%s] -job(%02d) %s", itask, itask.submit_num, msg) - self.setup_event_handlers( - itask, self.EVENT_RETRY, f"{self.JOB_FAILED}, {delay_msg}") + LOG.warning(f"[{itask}] {delay_msg}") + msg = f"{self.JOB_FAILED}, {delay_msg}" + self.setup_event_handlers(itask, self.EVENT_RETRY, msg) self._reset_job_timers(itask) return no_retries @@ -895,7 +897,7 @@ def _process_message_started(self, itask, event_time): """Helper for process_message, handle a started message.""" if itask.job_vacated: itask.job_vacated = False - LOG.warning(f"[{itask}] -Vacated job restarted") + LOG.warning(f"[{itask}] Vacated job restarted") self.reset_inactivity_timer_func() job_d = get_task_job_id(itask.point, itask.tdef.name, itask.submit_num) self.data_store_mgr.delta_job_time(job_d, 'started', event_time) @@ -903,7 +905,7 @@ def _process_message_started(self, itask, event_time): itask.set_summary_time('started', event_time) self.workflow_db_mgr.put_update_task_jobs(itask, { "time_run": itask.summary['started_time_string']}) - if itask.state.reset(TASK_STATUS_RUNNING): + if itask.state_reset(TASK_STATUS_RUNNING): self.setup_event_handlers( itask, self.EVENT_STARTED, f'job {self.EVENT_STARTED}') self.data_store_mgr.delta_task_state(itask) @@ -929,7 +931,7 @@ def _process_message_succeeded(self, itask, event_time): itask.tdef.elapsed_times.append( itask.summary['finished_time'] - itask.summary['started_time']) - if itask.state.reset(TASK_STATUS_SUCCEEDED): + if itask.state_reset(TASK_STATUS_SUCCEEDED): self.setup_event_handlers( itask, self.EVENT_SUCCEEDED, f"job {self.EVENT_SUCCEEDED}") self.data_store_mgr.delta_task_state(itask) @@ -941,7 +943,7 @@ def _process_message_submit_failed(self, itask, event_time): Return True if no retries (hence go to the submit-failed state). """ no_retries = False - LOG.error('[%s] -%s', itask, self.EVENT_SUBMIT_FAILED) + LOG.critical(f"[{itask}] {self.EVENT_SUBMIT_FAILED}") if event_time is None: event_time = get_current_time_string() self.workflow_db_mgr.put_update_task_jobs(itask, { @@ -959,7 +961,7 @@ def _process_message_submit_failed(self, itask, event_time): # No submission retry lined up: definitive failure. # See github #476. no_retries = True - if itask.state.reset(TASK_STATUS_SUBMIT_FAILED): + if itask.state_reset(TASK_STATUS_SUBMIT_FAILED): self.setup_event_handlers( itask, self.EVENT_SUBMIT_FAILED, f'job {self.EVENT_SUBMIT_FAILED}') @@ -968,27 +970,22 @@ def _process_message_submit_failed(self, itask, event_time): # There is a submission retry lined up. timer = itask.try_timers[TimerFlags.SUBMISSION_RETRY] self._retry_task(itask, timer.timeout, submit_retry=True) - delay_msg = f"submit-retrying in {timer.delay_timeout_as_str()}" - if itask.state.is_held: - delay_msg = f"held ({delay_msg})" - msg = "%s, %s" % (self.EVENT_SUBMIT_FAILED, delay_msg) - LOG.info("[%s] -job(%02d) %s", itask, itask.submit_num, msg) - self.setup_event_handlers( - itask, self.EVENT_SUBMIT_RETRY, - f"job {self.EVENT_SUBMIT_FAILED}, {delay_msg}") + delay_msg = f"retrying in {timer.delay_timeout_as_str()}" + LOG.warning(f"[{itask}] {delay_msg}") + msg = f"job {self.EVENT_SUBMIT_FAILED}, {delay_msg}" + self.setup_event_handlers(itask, self.EVENT_SUBMIT_RETRY, msg) self._reset_job_timers(itask) return no_retries def _process_message_submitted(self, itask, event_time): """Helper for process_message, handle a submit-succeeded message.""" with suppress(KeyError): + summary = itask.summary LOG.info( - '[%s] -job[%02d] submitted to %s:%s[%s]', - itask, - itask.summary['submit_num'], - itask.summary['platforms_used'][itask.summary['submit_num']], - itask.summary['job_runner_name'], - itask.summary['submit_method_id'] + f"[{itask}] submitted to " + f"{summary['platforms_used'][itask.submit_num]}:" + f"{summary['job_runner_name']}" + f"[{summary['submit_method_id']}]" ) self.workflow_db_mgr.put_update_task_jobs(itask, { "time_submit_exit": event_time, @@ -999,7 +996,7 @@ def _process_message_submitted(self, itask, event_time): # Simulate job execution at this point. itask.set_summary_time('submitted', event_time) itask.set_summary_time('started', event_time) - if itask.state.reset(TASK_STATUS_RUNNING): + if itask.state_reset(TASK_STATUS_RUNNING): self.data_store_mgr.delta_task_state(itask) itask.state.outputs.set_completion(TASK_OUTPUT_STARTED, True) self.data_store_mgr.delta_task_output(itask, TASK_OUTPUT_STARTED) @@ -1017,8 +1014,8 @@ def _process_message_submitted(self, itask, event_time): if itask.state.status == TASK_STATUS_PREPARING: # The job started message can (rarely) come in before the submit # command returns - in which case do not go back to 'submitted'. - if itask.state.reset(TASK_STATUS_SUBMITTED): - itask.state.reset(is_queued=False) + if itask.state_reset(TASK_STATUS_SUBMITTED): + itask.state_reset(is_queued=False) self.setup_event_handlers( itask, self.EVENT_SUBMITTED, f'job {self.EVENT_SUBMITTED}') self.data_store_mgr.delta_task_state(itask) @@ -1188,16 +1185,18 @@ def _setup_custom_event_handlers(self, itask, event, message): # fmt: on cmd = handler % (handler_data) except KeyError as exc: - LOG.error( + LOG.critical( + itask.point, itask.tdef.name, itask.submit_num, f"{itask.point}/{itask.tdef.name}/{itask.submit_num:02d} " - f"{key1} bad template: {exc}") + f"{key1} bad template: {exc}" + ) continue if cmd == handler: # Nothing substituted, assume classic interface cmd = (f"{handler} '{event}' '{self.workflow}' " f"'{itask.identity}' '{message}'") - LOG.debug(f"[{itask}] -Queueing {event} handler: {cmd}") + LOG.debug(f"[{itask}] Queueing {event} handler: {cmd}") self.add_event_timer( id_key, TaskActionTimer( @@ -1262,7 +1261,7 @@ def _reset_job_timers(self, itask): timeout_str = None itask.poll_timer = TaskActionTimer(ctx=ctx, delays=delays) # Log timeout and polling schedule - message = 'health check settings: %s=%s' % (timeout_key, timeout_str) + message = 'health: %s=%s' % (timeout_key, timeout_str) # Attempt to group identical consecutive delays as N*DELAY,... if itask.poll_timer.delays: items = [] # [(number of item - 1, item), ...] @@ -1277,7 +1276,7 @@ def _reset_job_timers(self, itask): message += '%d*' % (num + 1) message += '%s,' % intvl_as_str(item) message += '...' - LOG.info('[%s] -%s', itask, message) + LOG.info(f"[{itask}] {message}") # Set next poll time self.check_poll_time(itask) diff --git a/cylc/flow/task_job_mgr.py b/cylc/flow/task_job_mgr.py index dd07d6a1569..052dc72a175 100644 --- a/cylc/flow/task_job_mgr.py +++ b/cylc/flow/task_job_mgr.py @@ -166,8 +166,9 @@ def check_task_jobs(self, workflow, task_pool): poll_tasks.add(itask) if itask.poll_timer.delay is not None: LOG.info( - '[%s] -poll now, (next in %s)', - itask, itask.poll_timer.delay_timeout_as_str()) + f"[{itask}] poll now, (next in " + f"{itask.poll_timer.delay_timeout_as_str()})" + ) if poll_tasks: self.poll_task_jobs(workflow, poll_tasks) @@ -180,11 +181,11 @@ def kill_task_jobs(self, workflow, itasks): to_kill_tasks = [] for itask in itasks: if itask.state(*TASK_STATUSES_ACTIVE): - itask.state.reset(is_held=True) + itask.state_reset(is_held=True) self.data_store_mgr.delta_task_held(itask) to_kill_tasks.append(itask) else: - LOG.warning('skipping %s: task not killable' % itask.identity) + LOG.warning(f"[{itask}] not killable") self._run_job_cmd( self.JOBS_KILL, workflow, to_kill_tasks, self._kill_task_jobs_callback, @@ -426,9 +427,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, done_tasks.extend(itasks) for itask in itasks: # Log and persist - LOG.info( - '[%s] -submit-num=%02d, host=%s', - itask, itask.submit_num, host) + LOG.info(f"[{itask}] host={host}") self.workflow_db_mgr.put_insert_task_jobs(itask, { 'is_manual_submit': itask.is_manual_submit, 'try_num': itask.get_try_num(), @@ -661,7 +660,7 @@ def _job_cmd_out_callback(workflow, itask, cmd_ctx, line): handle.write((host + line).encode()) except IOError as exc: LOG.warning("%s: write failed\n%s" % (job_activity_log, exc)) - LOG.warning("[%s] -%s%s", itask, host, line) + LOG.warning(f"[{itask}] {host}{line}") def _kill_task_jobs_callback(self, ctx, workflow, itasks): """Callback when kill tasks command exits.""" @@ -703,11 +702,11 @@ def _kill_task_job_callback(self, workflow, itask, cmd_ctx, line): if ctx.ret_code: ctx.cmd = cmd_ctx.cmd # print original command on failure log_task_job_activity(ctx, workflow, itask.point, itask.tdef.name) - log_lvl = INFO - log_msg = 'killed' + log_lvl = WARNING + log_msg = 'job killed' if ctx.ret_code: # non-zero exit status log_lvl = WARNING - log_msg = 'kill failed' + log_msg = 'job kill failed' itask.state.kill_failed = True elif itask.state(TASK_STATUS_SUBMITTED): self.task_events_mgr.process_message( @@ -724,8 +723,7 @@ def _kill_task_job_callback(self, workflow, itask, cmd_ctx, line): self.data_store_mgr.delta_job_msg( get_task_job_id(itask.point, itask.tdef.name, itask.submit_num), log_msg) - LOG.log(log_lvl, "[%s] -job(%02d) %s" % ( - itask.identity, itask.submit_num, log_msg)) + LOG.log(log_lvl, f"[{itask}] {log_msg}") def _manip_task_jobs_callback( self, ctx, workflow, itasks, summary_callback, @@ -1136,13 +1134,13 @@ def _prep_submit_task_job(self, workflow, itask, check_syntax=True): and rtconfig['platform'] != platform_n ): LOG.debug( - f"for task {itask.identity}: platform = " + f"[{itask}] platform = " f"{rtconfig['platform']} evaluated as {platform_n}" ) rtconfig['platform'] = platform_n elif platform_n is None and rtconfig['remote']['host'] != host_n: LOG.debug( - f"for task {itask.identity}: host = " + f"[{itask}] host = " f"{rtconfig['remote']['host']} evaluated as {host_n}" ) rtconfig['remote']['host'] = host_n @@ -1196,7 +1194,6 @@ def _prep_submit_task_job(self, workflow, itask, check_syntax=True): def _prep_submit_task_job_error(self, workflow, itask, action, exc): """Helper for self._prep_submit_task_job. On error.""" - LOG.debug("submit_num %s" % itask.submit_num) log_task_job_activity( SubProcContext(self.JOBS_SUBMIT, action, err=exc, ret_code=1), workflow, @@ -1257,7 +1254,7 @@ def _prep_submit_task_job_impl(self, workflow, itask, rtconfig): 'pre-script': scripts[0], 'script': scripts[1], 'submit_num': itask.submit_num, - 'flow_label': itask.flow_label, + 'flow_nums': itask.flow_nums, 'workflow_name': workflow, 'task_id': itask.identity, 'try_num': itask.get_try_num(), diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 046b4ccce93..34197c5c704 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -18,7 +18,6 @@ from contextlib import suppress from collections import Counter -from string import ascii_letters import json from time import time from typing import Dict, Iterable, List, Optional, Set, TYPE_CHECKING, Tuple @@ -63,90 +62,11 @@ from cylc.flow.taskdef import TaskDef from cylc.flow.task_events_mgr import TaskEventsManager from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager + from cylc.flow.flow_mgr import FlowMgr Pool = Dict['PointBase', Dict[str, TaskProxy]] -class FlowLabelMgr: - """ - Manage flow labels consisting of a string of one or more letters [a-zA-Z]. - - Flow labels are task attributes representing the flow the task belongs to, - passed down to spawned children. If a new flow is started, a new single - character label is chosen randomly. This allows for 52 simultaneous flows - (which should be more than enough) with labels that are easy to work with. - - Flows merge locally when a task can't be spawned because it already exists - in the pool with a different label. We merge the labels at such tasks so - that downstream events can be considered to belong to either of the - original flows. Merged labels are simple strings that contains the - component labels, e.g. if flow "a" merges with flow "b" the merged result - is "ab" (or "ba", it doesn't matter which). - - """ - def __init__(self): - """Store available and used labels.""" - self.avail = set(ascii_letters) - self.inuse = set() - - def get_num_inuse(self): - """Return the number of labels currently in use.""" - return len(list(self.inuse)) - - def make_avail(self, labels): - """Return labels (set) to the pool of available labels.""" - LOG.info("returning flow label(s) %s", labels) - for label in labels: - with suppress(KeyError): - self.inuse.remove(label) - self.avail.add(label) - - def get_new_label(self): - """Return a new label, or None if we've run out.""" - try: - label = self.avail.pop() - except KeyError: - return None - self.inuse.add(label) - return label - - @staticmethod - def get_common_labels(labels): - """Return list of common labels.""" - set_labels = [set(lab) for lab in labels] - return set.intersection(*set_labels) - - @staticmethod - def merge_labels(lab1, lab2): - """Return the label representing both lab1 and lab2. - - Note the incoming labels could already be merged. - """ - if lab1 == lab2: - return lab1 - labs1 = set(lab1) - labs2 = set(lab2) - return ''.join(labs1.union(labs2)) - - @staticmethod - def unmerge_labels(prune, target): - """Unmerge prune from target.""" - for char in list(prune): - target = target.replace(char, '') - return target - - @staticmethod - def match_labels(lab1, lab2): - """Return True if lab1 and lab2 have any labels in common. - - If they do, the owner tasks can be considered part of the same flow. - Note the incoming labels could already be merged. - """ - labs1 = set(lab1) - labs2 = set(lab2) - return bool(labs1.intersection(labs2)) - - class TaskPool: """Task pool of a workflow.""" @@ -158,7 +78,8 @@ def __init__( config: 'WorkflowConfig', workflow_db_mgr: 'WorkflowDatabaseManager', task_events_mgr: 'TaskEventsManager', - data_store_mgr: 'DataStoreMgr' + data_store_mgr: 'DataStoreMgr', + flow_mgr: 'FlowMgr' ) -> None: self.config: 'WorkflowConfig' = config @@ -168,7 +89,7 @@ def __init__( # TODO this is ugly: self.task_events_mgr.spawn_func = self.spawn_on_output self.data_store_mgr: 'DataStoreMgr' = data_store_mgr - self.flow_label_mgr = FlowLabelMgr() + self.flow_mgr: 'FlowMgr' = flow_mgr self.do_reload = False self.custom_runahead_limit = self.config.get_custom_runahead_limit() @@ -270,11 +191,15 @@ def add_to_pool(self, itask, is_new=True): if is_new: # Add row to "task_states" table: - self.workflow_db_mgr.put_insert_task_states(itask, { - "time_created": get_current_time_string(), - "time_updated": get_current_time_string(), - "status": itask.state.status, - "flow_label": itask.flow_label}) + self.workflow_db_mgr.put_insert_task_states( + itask, + { + "time_created": get_current_time_string(), + "time_updated": get_current_time_string(), + "status": itask.state.status, + "flow_nums": json.dumps(list(itask.flow_nums)) + } + ) # Add row to "task_outputs" table: if itask.state.outputs.has_custom_triggers(): self.workflow_db_mgr.put_insert_task_outputs(itask) @@ -431,6 +356,12 @@ def compute_runahead(self): return runahead_limit_point + def update_flow_mgr(self): + flow_nums_seen = set() + for itask in self.get_all_tasks(): + flow_nums_seen.update(itask.flow_nums) + self.flow_mgr.load_flows_db(flow_nums_seen) + def load_abs_outputs_for_restart(self, row_idx, row): cycle, name, output = row self.abs_outputs_done.add((name, cycle, output)) @@ -446,13 +377,13 @@ def load_db_task_pool_for_restart(self, row_idx, row): if row_idx == 0: LOG.info("LOADING task proxies") # Create a task proxy corresponding to this DB entry. - (cycle, name, flow_label, is_late, status, is_held, submit_num, _, + (cycle, name, flow_nums, is_late, status, is_held, submit_num, _, platform_name, time_submit, time_run, timeout, outputs_str) = row try: itask = TaskProxy( self.config.get_taskdef(name), get_point(cycle), - flow_label, + set(json.loads(flow_nums)), is_held=is_held, submit_num=submit_num, is_late=bool(is_late)) @@ -508,8 +439,8 @@ def load_db_task_pool_for_restart(self, row_idx, row): for key, _ in itask_prereq.satisfied.items(): itask_prereq.satisfied[key] = sat[key] - itask.state.reset(status) - itask.state.reset(is_runahead=True) + itask.state_reset(status) + itask.state_reset(is_runahead=True) self.add_to_pool(itask, is_new=False) def load_db_task_action_timers(self, row_idx, row): @@ -542,14 +473,14 @@ def load_db_task_action_timers(self, row_idx, row): return LOG.info("+ %s.%s %s" % (name, cycle, ctx_key)) if ctx_key == "poll_timer": - itask = self._get_task_by_id(id_) + itask = self._get_main_task_by_id(id_) if itask is None: LOG.warning("%(id)s: task not found, skip" % {"id": id_}) return itask.poll_timer = TaskActionTimer( ctx, delays, num, delay, timeout) elif ctx_key[0] == "try_timers": - itask = self._get_task_by_id(id_) + itask = self._get_main_task_by_id(id_) if itask is None: LOG.warning("%(id)s: task not found, skip" % {"id": id_}) return @@ -598,6 +529,32 @@ def load_db_tasks_to_hold(self) -> None: self.workflow_db_mgr.pri_dao.select_tasks_to_hold() ) + def spawn_successor(self, itask): + """Spawn next-cycle instance of itask if parentless. + + This includes: + - tasks with no parents at the next point + - tasks with all parents before the workflow start point + - absolute-triggered tasks (after the first instance is spawned) + """ + next_point = itask.next_point() + if next_point is not None: + parent_points = itask.tdef.get_parent_points(next_point) + if ( + not parent_points + or all(x < self.config.start_point for x in parent_points) + or itask.tdef.get_abs_triggers(next_point) + ): + taskid = TaskID.get(itask.tdef.name, next_point) + next_task = ( + self._get_hidden_task_by_id(taskid) + or self._get_main_task_by_id(taskid) + or self.spawn_task( + itask.tdef.name, next_point, itask.flow_nums) + ) + if next_task: + self.add_to_pool(next_task) + def release_runahead_task( self, itask: TaskProxy, @@ -609,11 +566,9 @@ def release_runahead_task( - no parents to do it - has absolute triggers (these are satisfied already by definition) """ - if itask.state.reset(is_runahead=False): + if itask.state_reset(is_runahead=False): self.data_store_mgr.delta_task_runahead(itask) - LOG.info("[%s] -released from runahead", itask) - # Queue if ready to run if all(itask.is_ready_to_run()): # (otherwise waiting on xtriggers etc.) @@ -625,53 +580,24 @@ def release_runahead_task( if not runahead_limit_point: return - # Autospawn successor of itask if parentless. - n_task = self.spawn_successor(itask) - if n_task and n_task.point <= runahead_limit_point: - self.release_runahead_task(n_task, runahead_limit_point) - - def spawn_successor(self, itask): - """Spawn itask's successor (same task at next point) if parentless. - - This includes: - - tasks with no parents at the next point - - tasks with all parents before the workflow start point - - absolute-triggered tasks (after the first instance is spawned) - """ if itask.tdef.sequential: # implicit prev-instance parent - return None - - if not itask.reflow: - return None - - next_point = itask.next_point() - if next_point is not None: - parent_points = itask.tdef.get_parent_points(next_point) - n_task = None - if ( - ( - not parent_points - or all(x < self.config.start_point for x in parent_points) - ) - or itask.tdef.has_only_abs_triggers(next_point) - ): - n_task = self.get_or_spawn_task( - itask.tdef.name, next_point, - flow_label=itask.flow_label, - parent_id=itask.identity) + return - if n_task is not None: - self.add_to_pool(n_task) - return n_task + if not itask.flow_nums: + # No reflow + return - return None + # Autospawn successor of itask if parentless. + n_task = self.spawn_successor(itask) + if n_task and n_task.point <= runahead_limit_point: + self.release_runahead_task(n_task, runahead_limit_point) def remove(self, itask, reason=""): """Remove a task from the pool (e.g. after a reload).""" msg = "task proxy removed" if reason: - msg += " (%s)" % reason + msg += f" ({reason})" if reason == self.__class__.SUICIDE_MSG: log = LOG.critical @@ -687,7 +613,7 @@ def remove(self, itask, reason=""): self.hidden_pool_changed = True if not self.hidden_pool[itask.point]: del self.hidden_pool[itask.point] - log(f"[{itask}] -{msg}") + log(f"[{itask}] {msg}") return try: @@ -708,7 +634,7 @@ def remove(self, itask, reason=""): # Event-driven final update of task_states table. # TODO: same for datastore (still updated by scheduler loop) self.workflow_db_mgr.put_update_task_state(itask) - log(f"[{itask}] -{msg}") + log(f"[{itask}] {msg}") del itask def get_all_tasks(self) -> List[TaskProxy]: @@ -753,7 +679,7 @@ def _get_hidden_task_by_id(self, id_): with suppress(KeyError): return itask_ids[id_] - def _get_task_by_id(self, id_): + def _get_main_task_by_id(self, id_): """Return main pool task by ID if it exists, or None.""" for itask_ids in list(self.main_pool.values()): with suppress(KeyError): @@ -761,7 +687,7 @@ def _get_task_by_id(self, id_): def queue_task(self, itask: TaskProxy) -> None: """Queue a task that is ready to run.""" - if itask.state.reset(is_queued=True): + if itask.state_reset(is_queued=True): self.data_store_mgr.delta_task_queued(itask) self.task_queue_mgr.push_task(itask) @@ -778,7 +704,7 @@ def release_queued_tasks(self): ) ) for itask in released: - itask.state.reset(is_queued=False) + itask.state_reset(is_queued=False) itask.waiting_on_job_prep = True self.data_store_mgr.delta_task_queued(itask) LOG.info(f"Queue released: {itask.identity}") @@ -869,21 +795,21 @@ def reload_taskdefs(self) -> None: else: # Keep active orphaned task, but stop it from spawning. itask.graph_children = {} - LOG.warning("[%s] -will not spawn children" - " (task definition removed)", itask) + LOG.warning( + "f[{itask}] will not spawn children " + "- task definition removed" + ) else: new_task = TaskProxy( self.config.get_taskdef(itask.tdef.name), - itask.point, - itask.flow_label, itask.state.status) + itask.point, itask.flow_nums, itask.state.status) itask.copy_to_reload_successor(new_task) self._swap_out(new_task) - LOG.info('[%s] -reloaded task definition', itask) + LOG.info(f"[{itask}] reloaded task definition") if itask.state(*TASK_STATUSES_ACTIVE): LOG.warning( - "[%s] -job(%02d) active with pre-reload settings", - itask, - itask.submit_num) + f"[{itask}] active with pre-reload settings" + ) # Reassign live tasks to the internal queue del self.task_queue_mgr @@ -929,10 +855,10 @@ def set_stop_point(self, stop_point): ) ): LOG.warning( - "[%s] -not running (beyond workflow stop cycle) %s", - itask, - self.stop_point) - if itask.state.reset(is_held=True): + f"[{itask}] not running (beyond workflow stop cycle) " + f"{self.stop_point}" + ) + if itask.state_reset(is_held=True): self.data_store_mgr.delta_task_held(itask) return self.stop_point @@ -1073,13 +999,13 @@ def is_stalled(self) -> bool: return False def hold_active_task(self, itask: TaskProxy) -> None: - if itask.state.reset(is_held=True): + if itask.state_reset(is_held=True): self.data_store_mgr.delta_task_held(itask) self.tasks_to_hold.add((itask.tdef.name, itask.point)) self.workflow_db_mgr.put_tasks_to_hold(self.tasks_to_hold) def release_held_active_task(self, itask: TaskProxy) -> None: - if itask.state.reset(is_held=False): + if itask.state_reset(is_held=False): self.data_store_mgr.delta_task_held(itask) if (not itask.state.is_runahead) and all(itask.is_ready_to_run()): self.queue_task(itask) @@ -1193,7 +1119,7 @@ def spawn_on_output(self, itask, output): """Spawn and update itask's children, remove itask if finished. Also set a the abort-on-task-failed flag if necessary. - If not itask.reflow update existing children but don't spawn them. + If not reflow update existing children but don't spawn them. If an absolute output is completed update the store of completed abs outputs, and update the prerequisites of every instance of the child @@ -1217,18 +1143,38 @@ def spawn_on_output(self, itask, output): suicide = [] for c_name, c_point, is_abs in children: if is_abs: - self.abs_outputs_done.add((itask.tdef.name, - str(itask.point), output)) + self.abs_outputs_done.add( + (itask.tdef.name, str(itask.point), output)) self.workflow_db_mgr.put_insert_abs_output( str(itask.point), itask.tdef.name, output) self.workflow_db_mgr.process_queued_ops() - if itask.reflow: - c_task = self.get_or_spawn_task( - c_name, c_point, flow_label=itask.flow_label, - parent_id=itask.identity) - else: - # Don't spawn, but update existing children. - c_task = self.get_task(c_name, c_point) + + c_taskid = TaskID.get(c_name, c_point) + c_task = ( + self._get_hidden_task_by_id(c_taskid) + or self._get_main_task_by_id(c_taskid) + ) + if c_task is not None: + # Child already spawned, update it. + c_task.merge_flows(itask.flow_nums) + LOG.info( + f"[{c_task}] Merged in flow(s) " + f"{','.join(str(f) for f in itask.flow_nums)}" + ) + self.workflow_db_mgr.put_insert_task_states( + c_task, + { + "status": c_task.state.status, + "flow_nums": json.dumps(list(c_task.flow_nums)) + } + ) + # self.workflow_db_mgr.process_queued_ops() + + elif itask.flow_nums: + # Spawn child only if itask.flow_nums is not empty. + c_task = self.spawn_task( + c_name, c_point, itask.flow_nums, + ) if c_task is not None: # Update downstream prerequisites directly. @@ -1260,7 +1206,7 @@ def spawn_on_output(self, itask, output): TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING, is_held=False): - LOG.warning(f'[{c_task}] -suiciding while active') + LOG.warning(f"[{c_task}] suiciding while active") self.remove(c_task, self.__class__.SUICIDE_MSG) # Remove the parent task if finished and complete. @@ -1296,65 +1242,24 @@ def spawn_on_all_outputs(self, itask): continue for c_name, c_point, _ in children: - c_task = self.get_or_spawn_task( - c_name, c_point, - flow_label=itask.flow_label, - parent_id=itask.identity + c_taskid = TaskID.get(c_name, c_point) + c_task = ( + self._get_hidden_task_by_id(c_taskid) + or self._get_main_task_by_id(c_taskid) ) if c_task is not None: - # Add child to the task pool if not already there. + # already spawned + return + # Spawn child only if itask.flow_nums is not empty. + c_task = self.spawn_task(c_name, c_point, itask.flow_nums) + if c_task is not None: self.add_to_pool(c_task) - def get_or_spawn_task( - self, name, point, flow_label=None, reflow=True, parent_id=None - ): - """Return existing or spawned task, or None.""" - return (self.get_task(name, point, flow_label) - or self.spawn_task(name, point, flow_label, reflow, parent_id)) - - def _merge_flow_labels(self, itask, flab2): - """Merge flab2 into itask's flow label and update DB.""" - - # TODO can we do a more minimal (flow-label only) update of the - # existing row? (flow label is a primary key so need new insert). - # ? self.workflow_db_mgr.put_update_task_state(itask) - - if flab2 is None or flab2 == itask.flow_label: - return - itask.flow_label = self.flow_label_mgr.merge_labels( - itask.flow_label, flab2) - self.workflow_db_mgr.put_insert_task_states(itask, { - "status": itask.state.status, - "flow_label": itask.flow_label}) - self.workflow_db_mgr.process_queued_ops() # TODO is this needed here? - LOG.info('%s merged flow(%s)', itask.identity, itask.flow_label) - - def get_task_main(self, name, point, flow_label=None): - """Return task proxy from main pool and merge flow label if found.""" - itask = self._get_task_by_id(TaskID.get(name, point)) - if itask is not None and flow_label is not None: - self._merge_flow_labels(itask, flow_label) - return itask - - def get_task(self, name, point, flow_label=None): - """Return existing task proxy and merge flow label if found.""" - itask = ( - self._get_hidden_task_by_id(TaskID.get(name, point)) - or self._get_task_by_id(TaskID.get(name, point)) - ) - if itask is None: - LOG.debug('Task %s.%s not found in task pool.', name, point) - return None - self._merge_flow_labels(itask, flow_label) - return itask - def can_spawn(self, name: str, point: 'PointBase') -> bool: """Return True if name.point is within various workflow limits.""" - if name not in self.config.get_task_name_list(): LOG.debug('No task definition %s', name) return False - # Don't spawn outside of graph limits. # TODO: is it possible for initial_point to not be defined?? # (see also the similar check + log message in scheduler.py) @@ -1369,23 +1274,19 @@ def can_spawn(self, name: str, point: 'PointBase') -> bool: LOG.debug( 'Not spawning %s.%s: beyond final cycle point', name, point) return False - return True def spawn_task( self, name: str, point: 'PointBase', - flow_label: Optional[str] = None, - reflow: bool = True, - parent_id: Optional[str] = None + flow_nums: Set[int], ) -> Optional[TaskProxy]: - """Spawn name.point and add to runahead pool. Return it, or None.""" - + """Spawn name.point. Return the spawned task, or None.""" if not self.can_spawn(name, point): return None - # Get submit number by flow label {flow_label: submit_num, ...} + # Get submit number by flow_nums {flow_nums: submit_num, ...} snums = self.workflow_db_mgr.pri_dao.select_submit_nums( name, str(point) ) @@ -1396,12 +1297,12 @@ def spawn_task( submit_num = 0 for f_id in snums.keys(): - # Flow labels of previous instances. E.g. f_id "u". - if self.flow_label_mgr.match_labels(flow_label, f_id): - # Already spawned in this flow. E.g. flow_label "uV". - # TODO update existing DB row to avoid cond reflow from V too? - LOG.warning('Not spawning %s.%s (spawned in flow %s)', - name, point, f_id) + # Flow_nums of previous instances. + if set.intersection(flow_nums, set(json.loads(f_id))): + # To avoid "conditional reflow" with (e.g.) "foo | bar => baz". + LOG.warning( + f"Task {name}.{point} already spawned in {flow_nums}" + ) return None # Spawn if on-sequence and within recurrence bounds. @@ -1409,17 +1310,15 @@ def spawn_task( if not taskdef.is_valid_point(point): return None - itask = TaskProxy( - taskdef, point, flow_label, submit_num=submit_num, reflow=reflow - ) + itask = TaskProxy(taskdef, point, flow_nums, submit_num=submit_num) if (name, point) in self.tasks_to_hold: - LOG.info(f"[{itask}] -holding (as requested earlier)") + LOG.info(f"[{itask}] holding (as requested earlier)") self.hold_active_task(itask) elif self.hold_point and itask.point > self.hold_point: # Hold if beyond the workflow hold point LOG.info( - f"[{itask}] -holding (beyond " - f"workflow hold point: {self.hold_point})" + f"[{itask}] holding (beyond workflow " + f"hold point: {self.hold_point})" ) self.hold_active_task(itask) @@ -1431,8 +1330,8 @@ def spawn_task( break if future_trigger_overrun: LOG.warning( - f"[{itask}] -won't run: depends on a " - "task beyond the stop point" + f"[{itask}] won't run: depends on a task beyond " + f"the stop point ({self.stop_point})" ) # Attempt to satisfy any absolute triggers. @@ -1440,15 +1339,7 @@ def spawn_task( if itask.state.prerequisites_are_not_all_satisfied(): itask.state.satisfy_me(self.abs_outputs_done) - if parent_id is not None: - msg = "(" + parent_id + ") spawned %s.%s flow(%s)" - else: - msg = "(no parent) spawned %s.%s %s" - if flow_label is None: - # Manual trigger: new flow - msg += " (new flow)" - - LOG.info(msg, name, point, flow_label) + LOG.info(f"[{itask}] spawned") return itask def match_taskdefs( @@ -1492,17 +1383,16 @@ def match_taskdefs( continue return n_warnings, task_items - def force_spawn_children(self, items, outputs): + def force_spawn_children(self, items, outputs, flow_num): """Spawn downstream children of given task outputs on user command.""" n_warnings, task_items = self.match_taskdefs(items) for (_, point), taskdef in sorted(task_items.items()): # This the upstream target task: - itask = TaskProxy(taskdef, point, - self.flow_label_mgr.get_new_label()) + itask = TaskProxy(taskdef, point, flow_nums={flow_num}) # Spawn downstream on selected outputs. for trig, out, _ in itask.state.outputs.get_all(): if trig in outputs: - LOG.info('Forced spawning on %s:%s', itask.identity, out) + LOG.info(f"[{itask}] Forced spawning on {out}") self.spawn_on_output(itask, out) def remove_tasks(self, items): @@ -1513,42 +1403,66 @@ def remove_tasks(self, items): return len(bad_items) def force_trigger_tasks( - self, items: Iterable[str], reflow: bool = False + self, items: Iterable[str], + reflow: bool = False, + flow_descr: str = "no description" ) -> int: """Trigger matching tasks, with or without reflow. - Don't get a new flow label for existing task proxies (e.g. incomplete + Don't get a new flow number for existing task proxies (e.g. incomplete tasks). These can flow on in the original flow if retriggered. - Otherwise generate a new flow label for a new task proxy, with or + Otherwise generate a new flow number for a new task proxy, with or without reflow. Queue the task if not queued, otherwise release it to run. """ n_warnings, task_items = self.match_taskdefs(items) for name, point in task_items.keys(): - itask = self.get_task_main(name, point) - if itask is not None: - # Already in pool: trigger and merge flow labels. + task_id = TaskID.get(name, point) + itask = ( + self._get_main_task_by_id(task_id) + or self._get_hidden_task_by_id(task_id) + ) + if itask is None: + # Spawn with new flow number, unless no reflow. + if reflow: + flow_nums = {self.flow_mgr.get_new_flow(flow_descr)} + else: + flow_nums = set() + itask = self.spawn_task(name, point, flow_nums) + if itask is None: + continue + itask.is_manual_submit = True + # This will queue the task. + self.add_to_pool(itask, is_new=True) + else: + # In pool already + if itask.state(*TASK_STATUSES_ACTIVE): + LOG.warning(f"[{itask}] ignoring trigger - already active") + continue itask.is_manual_submit = True itask.reset_try_timers() # (If None, spawner reports cycle bounds errors). - if itask.state.reset(TASK_STATUS_WAITING): + if itask.state_reset(TASK_STATUS_WAITING): + # (could also be unhandled failed) self.data_store_mgr.delta_task_state(itask) # (No need to set prerequisites satisfied here). if not itask.state.is_queued: - LOG.info(f"Force-trigger: queueing {itask.identity}") self.queue_task(itask) + LOG.info( + f"[{itask}] queued, trigger again to submit now." + ) else: self.task_queue_mgr.force_release_task(itask) - else: - # Spawn with new flow label. - flow_label = self.flow_label_mgr.get_new_label() - itask = self.spawn_task( - name, point, flow_label, reflow=reflow) - itask.is_manual_submit = True - # This will queue the task. - self.add_to_pool(itask, is_new=True) + + self.workflow_db_mgr.put_insert_task_states( + itask, + { + "status": itask.state.status, + "flow_nums": json.dumps(list(itask.flow_nums)) + } + ) return n_warnings def sim_time_check(self, message_queue): @@ -1610,11 +1524,11 @@ def _set_expired_task(self, itask): itask.get_offset_as_seconds(itask.tdef.expiration_offset)) if time() > itask.expire_time: msg = 'Task expired (skipping job).' - LOG.warning('[%s] -%s', itask, msg) + LOG.warning(f"[{itask}] {msg}") self.task_events_mgr.setup_event_handlers(itask, "expired", msg) # TODO succeeded and expired states are useless due to immediate # removal under all circumstances (unhandled failed is still used). - if itask.state.reset(TASK_STATUS_EXPIRED, is_held=False): + if itask.state_reset(TASK_STATUS_EXPIRED, is_held=False): self.data_store_mgr.delta_task_state(itask) self.data_store_mgr.delta_task_held(itask) self.remove(itask, 'expired') @@ -1665,43 +1579,11 @@ def filter_task_proxies( bad_items.append(item) return itasks, bad_items - def stop_flow(self, flow_label): - """Stop a particular flow from spawning any further.""" - # Stop tasks belong to flow_label from continuing. + def stop_flow(self, flow_num): + """Stop a particular flow_num from spawning any further.""" for itask in self.get_all_tasks(): - # Don't use match_label(); we don't want to stop merged flows. - if itask.flow_label == flow_label: - itask.reflow = False - - def prune_flow_labels(self): - """Remove redundant flow labels. - - Note this iterates the task pool twice but it can be called - infrequently and doesn't do anything if there is only one flow. - - """ - if self.flow_label_mgr.get_num_inuse() == 1: - # Nothing to do. - return - # Gather all current labels. - labels = [itask.flow_label for itask in self.get_all_tasks()] - if not labels: - return - # Find any labels common to all tasks. - common = self.flow_label_mgr.get_common_labels(labels) - # And prune them back to just one. - num = len(list(common)) - if num <= 1: - return - LOG.debug('Pruning redundant flow labels: %s', common) - to_prune = [] - while num > 1: - to_prune.append(common.pop()) - num -= 1 - for itask in self.get_all_tasks(): - itask.flow_label = self.flow_label_mgr.unmerge_labels( - to_prune, itask.flow_label) - self.flow_label_mgr.make_avail(to_prune) + with suppress(KeyError): + itask.flow_nums.remove(flow_num) def log_task_pool(self, log_lvl=logging.DEBUG): """Log content of task and prerequisite pools in debug mode.""" diff --git a/cylc/flow/task_proxy.py b/cylc/flow/task_proxy.py index 5b7fcf0e67c..74d6ff5da6c 100644 --- a/cylc/flow/task_proxy.py +++ b/cylc/flow/task_proxy.py @@ -20,11 +20,12 @@ from contextlib import suppress from fnmatch import fnmatchcase from time import time -from typing import Any, Dict, List, Tuple, Optional, TYPE_CHECKING +from typing import Any, Dict, List, Set, Tuple, Optional, TYPE_CHECKING from metomi.isodatetime.timezone import get_local_time_zone import cylc.flow.cycling.iso8601 +from cylc.flow import LOG from cylc.flow.cycling.loader import standardise_point_string from cylc.flow.exceptions import PointParsingError from cylc.flow.platforms import get_platform @@ -125,10 +126,8 @@ class TaskProxy: objects. .graph_children (dict) graph children: {msg: [(name, point), ...]} - .flow_label: - flow label - .reflow: - flow on from outputs + .flow_nums: + flow_nums .waiting_on_job_prep: task waiting on job prep @@ -136,12 +135,11 @@ class TaskProxy: tdef: The definition object of this task. start_point: Start point to calculate the task's cycle point on start-up or the cycle point for subsequent tasks. - flow_label: Which flow within the scheduler this task belongs to. + flow_nums: Which flow within the scheduler this task belongs to. status: Task state string. is_held: True if the task is held, else False. submit_num: Number of times the task has attempted job submission. is_late: Is the task late? - reflow: Flow on from outputs. TODO: better description for arg? """ # Memory optimization - constrain possible attributes to this list. @@ -168,8 +166,7 @@ class TaskProxy: 'timeout', 'try_timers', 'graph_children', - 'flow_label', - 'reflow', + 'flow_nums', 'waiting_on_job_prep', ] @@ -177,12 +174,11 @@ def __init__( self, tdef: 'TaskDef', start_point: 'PointBase', - flow_label: Optional[str], + flow_nums: Optional[Set[int]] = None, status: str = TASK_STATUS_WAITING, is_held: bool = False, submit_num: int = 0, is_late: bool = False, - reflow: bool = True ) -> None: self.tdef = tdef @@ -190,8 +186,10 @@ def __init__( submit_num = 0 self.submit_num = submit_num self.jobs: List[str] = [] - self.flow_label = flow_label - self.reflow = reflow + if flow_nums is None: + self.flow_nums = set() + else: + self.flow_nums = flow_nums self.point = start_point self.identity: str = TaskID.get(self.tdef.name, self.point) @@ -211,7 +209,7 @@ def __init__( 'execution_time_limit': None, 'job_runner_name': None, 'submit_method_id': None, - 'flow_label': None + 'flow_nums': set() } self.local_job_file_path: Optional[str] = None @@ -239,8 +237,18 @@ def __repr__(self) -> str: return f"<{self.__class__.__name__} '{self.identity}'>" def __str__(self) -> str: - """Stringify using "self.identity".""" - return self.identity + """Stringify using identity, state, submit_num, and flow_nums. + + Ignore flow_nums if only the original flow is present. + """ + res = ( + f"{self.identity} " + f"{self.state} " + f"job:{self.submit_num:02d}" + ) + if self.flow_nums: + res += f" flows:{','.join(str(i) for i in self.flow_nums)}" + return res def copy_to_reload_successor(self, reload_successor): """Copy attributes to successor on reload of this task proxy.""" @@ -402,3 +410,17 @@ def name_match(self, name: str) -> bool: return any( fnmatchcase(ns, name) for ns in self.tdef.namespace_hierarchy ) + + def merge_flows(self, flow_nums: Set) -> None: + """Merge another set of flow_nums with mine.""" + self.flow_nums.update(flow_nums) + + def state_reset( + self, status=None, is_held=None, is_queued=None, is_runahead=None + ) -> bool: + """Set new state and log the change. Return whether it changed.""" + before = str(self) + if self.state.reset(status, is_held, is_queued, is_runahead): + LOG.info(f"[{before}] => {self.state}") + return True + return False diff --git a/cylc/flow/task_state.py b/cylc/flow/task_state.py index 4a73e166e3e..deca92c7a7d 100644 --- a/cylc/flow/task_state.py +++ b/cylc/flow/task_state.py @@ -17,9 +17,7 @@ """Task state related logic.""" -from cylc.flow import LOG from cylc.flow.prerequisite import Prerequisite -from cylc.flow.task_id import TaskID from cylc.flow.task_outputs import ( TaskOutputs, TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUBMIT_FAILED, @@ -179,8 +177,6 @@ class TaskState: True if the task is queued else False. .is_runahead (bool): True if the task is runahead limited else False. - .identity (str): - The task ID as `TASK.CYCLE` associated with this object. .is_updated (boolean): Has the status been updated since previous update? .kill_failed (boolean): @@ -209,7 +205,6 @@ class TaskState: "is_held", "is_queued", "is_runahead", - "identity", "is_updated", "kill_failed", "outputs", @@ -223,7 +218,6 @@ class TaskState: ] def __init__(self, tdef, point, status, is_held): - self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.is_held = is_held self.is_queued = False @@ -257,14 +251,14 @@ def __init__(self, tdef, point, status, is_held): self.kill_failed = False def __str__(self): - """Print status (is_held) (is_queued) (is_runahead).""" + """Print status(is_held)(is_queued)(is_runahead).""" ret = self.status if self.is_held: - ret += ' (held)' + ret += '(held)' if self.is_queued: - ret += ' (queued)' + ret += '(queued)' if self.is_runahead: - ret += ' (runahead)' + ret += '(runahead)' return ret def __call__( @@ -405,7 +399,7 @@ def reset( unchanged. Returns: - bool: True if state change, else False + returns: whether state change or not (bool) """ current_status = ( @@ -424,8 +418,6 @@ def reset( # no change - do nothing return False - prev_message = str(self) - # perform the actual state change self.status, self.is_held, self.is_queued, self.is_runahead = ( requested_status @@ -433,7 +425,6 @@ def reset( self.time_updated = get_current_time_string() self.is_updated = True - LOG.debug("[%s] -%s => %s", self.identity, prev_message, str(self)) if is_held: # only reset task outputs if not setting task to held diff --git a/cylc/flow/workflow_db_mgr.py b/cylc/flow/workflow_db_mgr.py index f8a9c1eac57..f1a3b9134fe 100644 --- a/cylc/flow/workflow_db_mgr.py +++ b/cylc/flow/workflow_db_mgr.py @@ -73,11 +73,13 @@ class WorkflowDatabaseManager: KEY_CYCLE_POINT_FORMAT = 'cycle_point_format' KEY_CYCLE_POINT_TIME_ZONE = 'cycle_point_tz' KEY_RESTART_COUNT = 'n_restart' + KEY_FLOW_COUNTER = "flow_counter" TABLE_BROADCAST_EVENTS = CylcWorkflowDAO.TABLE_BROADCAST_EVENTS TABLE_BROADCAST_STATES = CylcWorkflowDAO.TABLE_BROADCAST_STATES TABLE_INHERITANCE = CylcWorkflowDAO.TABLE_INHERITANCE TABLE_WORKFLOW_PARAMS = CylcWorkflowDAO.TABLE_WORKFLOW_PARAMS + TABLE_WORKFLOW_FLOWS = CylcWorkflowDAO.TABLE_WORKFLOW_FLOWS TABLE_WORKFLOW_TEMPLATE_VARS = CylcWorkflowDAO.TABLE_WORKFLOW_TEMPLATE_VARS TABLE_TASK_ACTION_TIMERS = CylcWorkflowDAO.TABLE_TASK_ACTION_TIMERS TABLE_TASK_POOL = CylcWorkflowDAO.TABLE_TASK_POOL @@ -116,6 +118,7 @@ def __init__(self, pri_d=None, pub_d=None): self.TABLE_BROADCAST_STATES: [], self.TABLE_INHERITANCE: [], self.TABLE_WORKFLOW_PARAMS: [], + self.TABLE_WORKFLOW_FLOWS: [], self.TABLE_WORKFLOW_TEMPLATE_VARS: [], self.TABLE_TASK_POOL: [], self.TABLE_TASK_ACTION_TIMERS: [], @@ -424,7 +427,7 @@ def put_update_task_state(self, itask): where_args = { "cycle": str(itask.point), "name": itask.tdef.name, - "flow_label": itask.flow_label, + "flow_nums": json.dumps(list(itask.flow_nums)), "submit_num": itask.submit_num, } self.db_updates_map.setdefault(self.TABLE_TASK_STATES, []) @@ -456,7 +459,7 @@ def put_task_pool(self, pool: 'TaskPool') -> None: self.db_inserts_map[self.TABLE_TASK_POOL].append({ "name": itask.tdef.name, "cycle": str(itask.point), - "flow_label": itask.flow_label, + "flow_nums": json.dumps(list(itask.flow_nums)), "status": itask.state.status, "is_held": itask.state.is_held }) @@ -500,7 +503,7 @@ def put_task_pool(self, pool: 'TaskPool') -> None: where_args = { "cycle": str(itask.point), "name": itask.tdef.name, - "flow_label": itask.flow_label + "flow_nums": json.dumps(list(itask.flow_nums)) } self.db_updates_map.setdefault(self.TABLE_TASK_STATES, []) self.db_updates_map[self.TABLE_TASK_STATES].append( @@ -558,6 +561,19 @@ def put_insert_abs_output(self, cycle, name, output): self.db_inserts_map.setdefault(CylcWorkflowDAO.TABLE_ABS_OUTPUTS, []) self.db_inserts_map[CylcWorkflowDAO.TABLE_ABS_OUTPUTS].append(args) + def put_insert_workflow_flows(self, flow_num, flow_metadata): + """Put INSERT statement for a new flow.""" + self.db_inserts_map.setdefault( + CylcWorkflowDAO.TABLE_WORKFLOW_FLOWS, [] + ) + self.db_inserts_map[CylcWorkflowDAO.TABLE_WORKFLOW_FLOWS].append( + { + "flow_num": flow_num, + "start_time": flow_metadata["start_time"], + "description": flow_metadata["description"], + } + ) + def _put_insert_task_x(self, table_name, itask, args): """Put INSERT statement for a task_* table.""" args.update({ @@ -589,8 +605,8 @@ def _put_update_task_x(self, table_name, itask, set_args): "name": itask.tdef.name} if "submit_num" not in set_args: where_args["submit_num"] = itask.submit_num - if "flow_label" not in set_args: - where_args["flow_label"] = itask.flow_label + if "flow_nums" not in set_args: + where_args["flow_nums"] = json.dumps(list(itask.flow_nums)) self.db_updates_map.setdefault(table_name, []) self.db_updates_map[table_name].append((set_args, where_args)) diff --git a/cylc/flow/workflow_files.py b/cylc/flow/workflow_files.py index fd89da4ffb2..181ffd88531 100644 --- a/cylc/flow/workflow_files.py +++ b/cylc/flow/workflow_files.py @@ -640,7 +640,7 @@ async def load_contact_file_async(reg, run_dir=None): def register( - flow_name: str, source: Optional[str] = None + workflow_name: str, source: Optional[str] = None ) -> str: """Set up workflow. This completes some of the set up completed by cylc install. @@ -653,7 +653,7 @@ def register( Creates the .service directory. Args: - flow_name: workflow name. + workflow_name: workflow name. source: directory location of flow.cylc file, default $PWD. Return: @@ -665,7 +665,7 @@ def register( - Illegal name (can look like a relative path, but not absolute). - Nested workflow run directories. """ - validate_workflow_name(flow_name) + validate_workflow_name(workflow_name) if source is not None: if os.path.basename(source) == WorkflowFiles.FLOW_FILE: source = os.path.dirname(source) @@ -674,16 +674,16 @@ def register( # flow.cylc must exist so we can detect accidentally reversed args. source = os.path.abspath(source) check_flow_file(source, symlink_suiterc=True, logger=None) - if not is_installed(get_workflow_run_dir(flow_name)): + if not is_installed(get_workflow_run_dir(workflow_name)): symlinks_created = make_localhost_symlinks( - get_workflow_run_dir(flow_name), flow_name) + get_workflow_run_dir(workflow_name), workflow_name) if symlinks_created: for src, dst in symlinks_created.items(): LOG.info(f"Symlink created from {src} to {dst}") # Create service dir if necessary. - srv_d = get_workflow_srv_dir(flow_name) + srv_d = get_workflow_srv_dir(workflow_name) os.makedirs(srv_d, exist_ok=True) - return flow_name + return workflow_name def is_installed(rund: Union[Path, str]) -> bool: @@ -1535,7 +1535,7 @@ def reinstall_workflow(named_run, rundir, source, dry_run=False): def install_workflow( - flow_name: Optional[str] = None, + workflow_name: Optional[str] = None, source: Optional[Union[Path, str]] = None, run_name: Optional[str] = None, no_run_name: bool = False, @@ -1548,19 +1548,19 @@ def install_workflow( work, log, share, share/cycle directories. Args: - flow_name: workflow name, default basename($PWD). + workflow_name: workflow name, default basename($PWD). source: directory location of flow.cylc file, default $PWD. run_name: name of the run, overrides run1, run2, run 3 etc... If specified, cylc install will not create runN symlink. rundir: for overriding the default cylc-run directory. no_run_name: Flag as True to install workflow into - ~/cylc-run/ + ~/cylc-run/ cli_symlink_dirs: Symlink dirs, if entered on the cli. Return: - source: The source directory. - rundir: The directory the workflow has been installed into. - flow_name: The installed workflow name (which may be computed here). + source: source directory. + rundir: directory the workflow has been installed into. + workflow_name: installed workflow name (which may be computed here). Raise: WorkflowFilesError: @@ -1574,17 +1574,17 @@ def install_workflow( elif Path(source).name == WorkflowFiles.FLOW_FILE: source = Path(source).parent source = Path(expand_path(source)) - if not flow_name: - flow_name = source.name - validate_workflow_name(flow_name) + if not workflow_name: + workflow_name = source.name + validate_workflow_name(workflow_name) if run_name in WorkflowFiles.RESERVED_NAMES: raise WorkflowFilesError(f'Run name cannot be "{run_name}".') if run_name is not None and len(Path(run_name).parts) != 1: raise WorkflowFilesError( f'Run name cannot be a path. (You used {run_name})' ) - validate_source_dir(source, flow_name) - run_path_base = Path(get_workflow_run_dir(flow_name)) + validate_source_dir(source, workflow_name) + run_path_base = Path(get_workflow_run_dir(workflow_name)) relink, run_num, rundir = get_run_dir_info( run_path_base, run_name, no_run_name) if Path(rundir).exists(): @@ -1594,7 +1594,7 @@ def install_workflow( " name, using the --run-name option.") check_nested_run_dirs(rundir) symlinks_created = {} - named_run = flow_name + named_run = workflow_name if run_name: named_run = os.path.join(named_run, run_name) elif run_num: @@ -1652,7 +1652,7 @@ def install_workflow( install_log.info(f'INSTALLED {named_run} from {source}') print(f'INSTALLED {named_run} from {source}') close_log(install_log) - return source, rundir, flow_name + return source, rundir, workflow_name def get_run_dir_info( @@ -1701,7 +1701,7 @@ def detect_flow_exists( Args: run_path_base: Absolute path of workflow directory, - i.e ~/cylc-run/ + i.e ~/cylc-run/ numbered: If True, will detect if numbered runs exist. If False, will detect if non-numbered runs exist, i.e. runs installed by --run-name. @@ -1767,7 +1767,7 @@ def create_workflow_srv_dir(rundir=None, source=None): workflow_srv_d.mkdir(exist_ok=True, parents=True) -def validate_source_dir(source, flow_name): +def validate_source_dir(source, workflow_name): """Ensure the source directory is valid. Args: @@ -1782,14 +1782,14 @@ def validate_source_dir(source, flow_name): for dir_ in WorkflowFiles.RESERVED_DIRNAMES: if Path(source, dir_).exists(): raise WorkflowFilesError( - f'{flow_name} installation failed. - {dir_} exists in source ' - 'directory.') + f"{workflow_name} installation failed. " + f"- {dir_} exists in source directory.") cylc_run_dir = Path(get_cylc_run_dir()) if (os.path.abspath(os.path.realpath(cylc_run_dir)) in os.path.abspath(os.path.realpath(source))): raise WorkflowFilesError( - f'{flow_name} installation failed. Source directory should not be ' - f'in {cylc_run_dir}') + f"{workflow_name} installation failed. Source directory " + f"should not be in {cylc_run_dir}") check_flow_file(source, logger=None) @@ -1862,7 +1862,7 @@ def link_runN(latest_run: Union[Path, str]): run_n.symlink_to(latest_run.name) -def search_install_source_dirs(flow_name: str) -> Path: +def search_install_source_dirs(workflow_name: str) -> Path: """Return the path of a workflow source dir if it is present in the 'global.cylc[install]source dirs' search path.""" search_path: List[str] = glbl_cfg().get(['install', 'source dirs']) @@ -1872,9 +1872,10 @@ def search_install_source_dirs(flow_name: str) -> Path: "does not contain any paths") for path in search_path: try: - flow_file = check_flow_file(Path(path, flow_name), logger=None) + flow_file = check_flow_file(Path(path, workflow_name), logger=None) return flow_file.parent except WorkflowFilesError: continue raise WorkflowFilesError( - f"Could not find workflow '{flow_name}' in: {', '.join(search_path)}") + f"Could not find workflow '{workflow_name}' in: " + f"{', '.join(search_path)}") diff --git a/setup.cfg b/setup.cfg index 3d0f4f54e14..ad39ecd6b39 100644 --- a/setup.cfg +++ b/setup.cfg @@ -121,7 +121,6 @@ cylc.main_loop = log_data_store = cylc.flow.main_loop.log_data_store log_main_loop = cylc.flow.main_loop.log_main_loop log_memory = cylc.flow.main_loop.log_memory - prune_flow_labels = cylc.flow.main_loop.prune_flow_labels reset_bad_hosts = cylc.flow.main_loop.reset_bad_hosts # NOTE: all entry points should be listed here even if Cylc Flow does not # provide any implementations, to make entry point scraping easier diff --git a/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc b/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc index 3d74ebbc90e..ca7fcf75f59 100644 --- a/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc +++ b/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc @@ -38,8 +38,8 @@ trigger, and the workflow to shut down successfully.""" script = """ cylc poll "${CYLC_WORKFLOW_NAME}" -cylc__job__poll_grep_workflow_log -F \ - "[submit_hold.${CYLC_TASK_CYCLE_POINT}] -preparing => submitted" +cylc__job__poll_grep_workflow_log \ + "submit_hold.${CYLC_TASK_CYCLE_POINT} preparing .* => submitted" st_file="${CYLC_WORKFLOW_RUN_DIR}/log/job/${CYLC_TASK_CYCLE_POINT}/submit_hold/NN/job.status" pkill -g "$(awk -F= '$1 == "CYLC_JOB_ID" {print $2}' "${st_file}")" diff --git a/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t b/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t index 7d38027a1b6..7e9a3ef91a5 100755 --- a/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t +++ b/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t @@ -58,16 +58,15 @@ __PYTHON__ LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" # Test logging of the "next job poll" message when task starts. TEST_NAME="${TEST_NAME_BASE}-log-entry" -LINE="$(grep -F '[foo.1] -health check settings: execution timeout=PT10S' "${LOG}")" -run_ok "${TEST_NAME}" grep -q 'health check settings: execution timeout=PT10S' \ - <<< "${LINE}" +LINE="$(grep '\[foo\.1 .* execution timeout=PT10S' "${LOG}")" +run_ok "${TEST_NAME}" grep -q 'health: execution timeout=PT10S' <<< "${LINE}" # Determine poll times. PREDICTED_POLL_TIME=$(time_offset \ "$(cut -d ' ' -f 1 <<< "${LINE}")" \ "$(sed -n 's/^.*execution timeout=\([^,]\+\).*$/\1/p' <<< "${LINE}")") ACTUAL_POLL_TIME=$(sed -n \ - 's/\(.*\) INFO - \[foo.1\] status=running: (polled)failed .*/\1/p' \ - "${LOG}") + 's/\(.*\) INFO - \[foo.1 running .* (polled)failed .*/\1/p' "${LOG}") + # Test execution timeout polling. # Main loop is roughly 1 second, but integer rounding may give an apparent 2 # seconds delay, so set threshold as 2 seconds. diff --git a/tests/flakyfunctional/database/00-simple.t b/tests/flakyfunctional/database/00-simple.t index 142308722fb..0854c37bf98 100644 --- a/tests/flakyfunctional/database/00-simple.t +++ b/tests/flakyfunctional/database/00-simple.t @@ -45,6 +45,7 @@ sed -i "s/$(cylc --version)//g" "${NAME}" cmp_ok "${NAME}" << __EOF__ UTC_mode|0 cylc_version| +flow_counter|1 __EOF__ NAME='select-task-events.out' diff --git a/tests/flakyfunctional/database/00-simple/schema.out b/tests/flakyfunctional/database/00-simple/schema.out index d5b9485f175..8d649d01903 100644 --- a/tests/flakyfunctional/database/00-simple/schema.out +++ b/tests/flakyfunctional/database/00-simple/schema.out @@ -8,10 +8,11 @@ CREATE TABLE task_events(name TEXT, cycle TEXT, time TEXT, submit_num INTEGER, e CREATE TABLE task_jobs(cycle TEXT, name TEXT, submit_num INTEGER, is_manual_submit INTEGER, try_num INTEGER, time_submit TEXT, time_submit_exit TEXT, submit_status INTEGER, time_run TEXT, time_run_exit TEXT, run_signal TEXT, run_status INTEGER, platform_name TEXT, job_runner_name TEXT, job_id TEXT, PRIMARY KEY(cycle, name, submit_num)); CREATE TABLE task_late_flags(cycle TEXT, name TEXT, value INTEGER, PRIMARY KEY(cycle, name)); CREATE TABLE task_outputs(cycle TEXT, name TEXT, outputs TEXT, PRIMARY KEY(cycle, name)); -CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_label TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_label)); +CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_nums TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_nums)); CREATE TABLE task_prerequisites(cycle TEXT, name TEXT, prereq_name TEXT, prereq_cycle TEXT, prereq_output TEXT, satisfied TEXT, PRIMARY KEY(cycle, name, prereq_name, prereq_cycle, prereq_output)); -CREATE TABLE task_states(name TEXT, cycle TEXT, flow_label TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_label)); +CREATE TABLE task_states(name TEXT, cycle TEXT, flow_nums TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_nums)); CREATE TABLE task_timeout_timers(cycle TEXT, name TEXT, timeout REAL, PRIMARY KEY(cycle, name)); CREATE TABLE tasks_to_hold(name TEXT, cycle TEXT); +CREATE TABLE workflow_flows(flow_num INTEGER, start_time TEXT, description TEXT, PRIMARY KEY(flow_num)); CREATE TABLE xtriggers(signature TEXT, results TEXT, PRIMARY KEY(signature)); CREATE TABLE absolute_outputs(cycle TEXT, name TEXT, output TEXT); diff --git a/tests/flakyfunctional/events/01-task/events.log b/tests/flakyfunctional/events/01-task/events.log index 3f7375d8dd7..af8a408e18c 100644 --- a/tests/flakyfunctional/events/01-task/events.log +++ b/tests/flakyfunctional/events/01-task/events.log @@ -5,7 +5,7 @@ failed baz.1 job failed retry foo.1 job failed, retrying in PT3S started baz.1 job started submission failed bar.1 job submission failed -submission retry bar.1 job submission failed, submit-retrying in PT3S +submission retry bar.1 job submission failed, retrying in PT3S submission timeout baz.1 submission timeout after PT3S submitted baz.1 job submitted succeeded foo.1 job succeeded diff --git a/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc b/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc index beda587632a..7e383e375ba 100644 --- a/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc +++ b/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc @@ -8,8 +8,8 @@ script = """ echo '# killing "sleeper"' cylc kill "${CYLC_WORKFLOW_NAME}" "sleeper.1" - cylc__job__poll_grep_workflow_log -F \ - '[sleeper.1] -job(01) failed, held' + cylc__job__poll_grep_workflow_log -E \ + 'sleeper\.1 waiting\(held\) .* job killed' sleep 10 # sleep, should still be held after 10 seconds cylc dump -s -t "${CYLC_WORKFLOW_NAME}" >'cylc-dump.out' diff -u 'cylc-dump.out' - <<'__OUT__' diff --git a/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc b/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc index d5332b9ab1e..4d2dd2e74a1 100644 --- a/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc +++ b/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc @@ -23,7 +23,8 @@ script = cylc hold --after '20140101T12' "${CYLC_WORKFLOW_NAME}" [[stopper]] script = """ - cylc__job__poll_grep_workflow_log '\[bar\.20140101T1200Z\].* (received)succeeded' + cylc__job__poll_grep_workflow_log -E \ + 'bar\.20140101T1200Z .* \(received\)succeeded' cylc stop "${CYLC_WORKFLOW_NAME}" """ [[[job]]] diff --git a/tests/functional/authentication/00-shared-fs.t b/tests/functional/authentication/00-shared-fs.t index f9b206741ae..00a033e16e9 100755 --- a/tests/functional/authentication/00-shared-fs.t +++ b/tests/functional/authentication/00-shared-fs.t @@ -41,8 +41,8 @@ WORKFLOW_LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" # Note: double poll existence of workflow log on workflow host and then localhost to # avoid any issues with unstable mounting of the shared file system. poll ssh -oBatchMode=yes -n "${CYLC_TEST_HOST}" test -e "${WORKFLOW_LOG}" -poll_grep_workflow_log -F '[t1.19700101T0000Z] -submitted => running' -poll_grep_workflow_log -F '[t1.19700101T0000Z] -running => failed' +poll_grep_workflow_log -E 't1\.19700101T0000Z submitted .* => running' +poll_grep_workflow_log -E 't1\.19700101T0000Z running .* => failed' run_ok "${TEST_NAME_BASE}-broadcast" \ cylc broadcast -n 't1' -s '[environment]CYLC_TEST_VAR_FOO=foo' "${WORKFLOW_NAME}" diff --git a/tests/functional/broadcast/07-timeout/flow.cylc b/tests/functional/broadcast/07-timeout/flow.cylc index b444066c3e6..46bbc749fe9 100644 --- a/tests/functional/broadcast/07-timeout/flow.cylc +++ b/tests/functional/broadcast/07-timeout/flow.cylc @@ -17,7 +17,8 @@ """ [[timeout]] script = """ -cylc__job__poll_grep_workflow_log -F "[${CYLC_TASK_ID}] -execution timeout after PT1S" -""" + cylc__job__poll_grep_workflow_log -E \ + "${CYLC_TASK_ID} .* execution timeout after PT1S" + """ [[[events]]] execution timeout = PT1M diff --git a/tests/functional/cylc-cat-log/05-remote-tail.t b/tests/functional/cylc-cat-log/05-remote-tail.t index 32f0e54d51d..e918709ee80 100755 --- a/tests/functional/cylc-cat-log/05-remote-tail.t +++ b/tests/functional/cylc-cat-log/05-remote-tail.t @@ -41,7 +41,7 @@ $SCP "${PWD}/bin/my-tailer.sh" \ # Run detached. workflow_run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}" #------------------------------------------------------------------------------- -poll_grep_workflow_log -F '[foo.1] status=submitted' +poll_grep_workflow_log -E 'foo\.1 preparing .* => submitted' # cylc cat-log -m 't' tail-follows a file, so needs to be killed. # Send interrupt signal to tail command after 15 seconds. TEST_NAME="${TEST_NAME_BASE}-cat-log" diff --git a/tests/functional/cylc-message/02-multi.t b/tests/functional/cylc-message/02-multi.t index 2ede9e27cef..80684779d95 100755 --- a/tests/functional/cylc-message/02-multi.t +++ b/tests/functional/cylc-message/02-multi.t @@ -49,26 +49,27 @@ run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --debug --no-detach "${WORKFLOW_NAME}" LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" -sed -n -e 's/^.* \([A-Z]* - \[foo.1\] status=running: (received).*$\)/\1/p' \ - -e '/badness\|slowness\|and other incorrectness/p' \ +sed -r -n -e 's/^.* ([A-Z]+ .* \(received\).*$)/\1/p' \ + -e '/badness|slowness|and other incorrectness/p' \ "${LOG}" >'sed.out' sed -i 's/\(^.*\) at .*$/\1/;' 'sed.out' # Note: the continuation bit gets printed twice, because the message gets a # warning as being unhandled. cmp_ok 'sed.out' <<__LOG__ -WARNING - [foo.1] status=running: (received)Warn this -INFO - [foo.1] status=running: (received)Greeting -WARNING - [foo.1] status=running: (received)Warn that -DEBUG - [foo.1] status=running: (received)Remove stuffs such as +INFO - [foo.1 submitted job:01 flows:1] (received)started +WARNING - [foo.1 running job:01 flows:1] (received)Warn this +INFO - [foo.1 running job:01 flows:1] (received)Greeting +WARNING - [foo.1 running job:01 flows:1] (received)Warn that +DEBUG - [foo.1 running job:01 flows:1] (received)Remove stuffs such as ${LOG_INDENT}badness ${LOG_INDENT}slowness ${LOG_INDENT}and other incorrectness. ${LOG_INDENT}badness ${LOG_INDENT}slowness ${LOG_INDENT}and other incorrectness. -INFO - [foo.1] status=running: (received)whatever -INFO - [foo.1] status=running: (received)succeeded +INFO - [foo.1 running job:01 flows:1] (received)whatever +INFO - [foo.1 running job:01 flows:1] (received)succeeded __LOG__ purge diff --git a/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc b/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc index 0fab38209c7..b24ac3219ec 100644 --- a/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc +++ b/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc @@ -18,8 +18,8 @@ cylc__job__wait_cylc_message_started echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|INFO|hello1" echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|INFO|hello2" } >>"${CYLC_TASK_LOG_ROOT}.status" -cylc__job__poll_grep_workflow_log -F '[speaker1.1] status=running: (polled)hello1' -cylc__job__poll_grep_workflow_log -F '[speaker1.1] status=running: (polled)hello2' +cylc__job__poll_grep_workflow_log -E 'speaker1\.1 running .* \(polled\)hello1' +cylc__job__poll_grep_workflow_log -E 'speaker1\.1 running .* \(polled\)hello2' """ [[[outputs]]] hello1 = "hello1" @@ -32,7 +32,7 @@ cylc__job__wait_cylc_message_started # get sent back to the workflow echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|INFO|greet" \ >>"${CYLC_TASK_LOG_ROOT}.status" -cylc__job__poll_grep_workflow_log -F '[speaker2.1] status=running: (polled)greet' +cylc__job__poll_grep_workflow_log -E 'speaker2\.1 running .* \(polled\)greet' """ [[[outputs]]] greet = "greet" diff --git a/tests/functional/cylc-poll/13-comm-method.t b/tests/functional/cylc-poll/13-comm-method.t index 13d71a0d09d..ce97785bc94 100755 --- a/tests/functional/cylc-poll/13-comm-method.t +++ b/tests/functional/cylc-poll/13-comm-method.t @@ -35,11 +35,11 @@ workflow_run_ok "${TEST_NAME_BASE}-run" \ #------------------------------------------------------------------------------- LOG_FILE="${WORKFLOW_RUN_DIR}/log/workflow/log" -PRE_MSG='-health check settings:' +PRE_MSG='health:' POST_MSG='.*, polling intervals=10\*PT6S...' for INDEX in 1 2; do for STAGE in 'submission' 'execution'; do - grep_ok "\[t${INDEX}\.1\] ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" + grep_ok "t${INDEX}\.1 .* ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" -E done done #------------------------------------------------------------------------------- diff --git a/tests/functional/cylc-poll/14-intervals.t b/tests/functional/cylc-poll/14-intervals.t index 3b5fcd1b67a..b39ddf787ab 100755 --- a/tests/functional/cylc-poll/14-intervals.t +++ b/tests/functional/cylc-poll/14-intervals.t @@ -33,7 +33,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" \ #------------------------------------------------------------------------------- LOG_FILE="${WORKFLOW_RUN_DIR}/log/workflow/log" -PRE_MSG='-health check settings:' +PRE_MSG='health:' for INDEX in 1 2; do for STAGE in 'submission' 'execution'; do POLL_INT='PT2S,6\*PT10S,' @@ -41,7 +41,7 @@ for INDEX in 1 2; do POLL_INT='2\*PT1S,10\*PT6S,' fi POST_MSG=".*, polling intervals=${POLL_INT}..." - grep_ok "\[t${INDEX}\.1\] ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" + grep_ok "t${INDEX}\.1 .* ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" -E done done #------------------------------------------------------------------------------- diff --git a/tests/functional/cylc-poll/15-job-st-file-no-batch.t b/tests/functional/cylc-poll/15-job-st-file-no-batch.t index e373ccca942..8b5ead23684 100755 --- a/tests/functional/cylc-poll/15-job-st-file-no-batch.t +++ b/tests/functional/cylc-poll/15-job-st-file-no-batch.t @@ -26,11 +26,9 @@ workflow_run_fail "${TEST_NAME_BASE}-run" \ cylc play --reference-test --debug --no-detach "${WORKFLOW_NAME}" LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" run_ok "${TEST_NAME_BASE}-log-1" \ - grep -F '[jobs-poll err] 1/t1/01/job.status: incomplete job runner info' \ - "${LOG}" + grep -F '[jobs-poll err] 1/t1/01/job.status: incomplete job runner info' "${LOG}" run_ok "${TEST_NAME_BASE}-log-2" \ - grep -F '[t1.1] status=running: (polled)failed' \ - "${LOG}" + grep -E 't1\.1 running .*\(polled\)failed' "${LOG}" purge exit diff --git a/tests/functional/cylc-remove/00-simple/flow.cylc b/tests/functional/cylc-remove/00-simple/flow.cylc index 91720efbc74..49ed6e48d81 100644 --- a/tests/functional/cylc-remove/00-simple/flow.cylc +++ b/tests/functional/cylc-remove/00-simple/flow.cylc @@ -15,7 +15,7 @@ script = false [[cleaner]] script = """ -cylc__job__poll_grep_workflow_log '\[b\.1\].* (received)failed' +cylc__job__poll_grep_workflow_log -E 'b\.1 running .* \(received\)failed' # Remove the unhandled failed task. cylc remove "$CYLC_WORKFLOW_ID" 'b.1' # Remove waiting c.1 diff --git a/tests/functional/cylc-remove/02-cycling/flow.cylc b/tests/functional/cylc-remove/02-cycling/flow.cylc index 0faa680126f..8330815fdd4 100644 --- a/tests/functional/cylc-remove/02-cycling/flow.cylc +++ b/tests/functional/cylc-remove/02-cycling/flow.cylc @@ -15,8 +15,8 @@ [runtime] [[remover]] script = """ -cylc__job__poll_grep_workflow_log '\[bar\.2020\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[baz\.2021\].* (received)failed' +cylc__job__poll_grep_workflow_log -E 'bar\.2020 running .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'baz\.2021 running .* \(received\)failed' # Remove the two unhandled failed tasks. cylc remove $CYLC_WORKFLOW_ID */ba*:failed # Remove the two unsatisfied waiting tasks. diff --git a/tests/functional/cylc-trigger/02-filter-failed/flow.cylc b/tests/functional/cylc-trigger/02-filter-failed/flow.cylc index 757be5c89f9..3778272e219 100644 --- a/tests/functional/cylc-trigger/02-filter-failed/flow.cylc +++ b/tests/functional/cylc-trigger/02-filter-failed/flow.cylc @@ -16,9 +16,9 @@ FIXABLES:succeed-all => Z [[fixer]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log '\[fixable1\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable2\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable3\.1\].* (received)failed' +cylc__job__poll_grep_workflow_log -E 'fixable1\.1 running .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable2\.1 running .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable3\.1 running .* \(received\)failed' cylc trigger "${CYLC_WORKFLOW_ID}" '1/fixable*' """ [[Z]] diff --git a/tests/functional/cylc-trigger/04-filter-names/flow.cylc b/tests/functional/cylc-trigger/04-filter-names/flow.cylc index 932c2fc72e3..37c366bc3e7 100644 --- a/tests/functional/cylc-trigger/04-filter-names/flow.cylc +++ b/tests/functional/cylc-trigger/04-filter-names/flow.cylc @@ -22,11 +22,11 @@ FIXABLES:succeed-all & loser:fail => Z [[fixer]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log '\[fixable-1a\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-1b\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-2a\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-2b\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-3\.1\].* (received)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-1a\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-1b\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-2a\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-2b\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-3\.1 .* \(received\)failed' cylc trigger "${CYLC_WORKFLOW_ID}" '1/FIXABLE-1' '1/fixable-2*' '1/fixable-3' """ [[loser]] diff --git a/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc b/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc index b672cb9daa1..2953007c234 100644 --- a/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc +++ b/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc @@ -19,8 +19,8 @@ [[fixer]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log '\[fixable\.19700101T0000Z\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable\.19900101T0000Z\].* (received)failed' +cylc__job__poll_grep_workflow_log -E 'fixable\.19700101T0000Z .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable\.19900101T0000Z .* \(received\)failed' cylc trigger "${CYLC_WORKFLOW_ID}" '19700101T0000Z/*' '19900101T0000Z/*' """ [[z]] diff --git a/tests/functional/events/23-workflow-stalled-handler/flow.cylc b/tests/functional/events/23-workflow-stalled-handler/flow.cylc index 96e183124d0..4cd521ed24e 100644 --- a/tests/functional/events/23-workflow-stalled-handler/flow.cylc +++ b/tests/functional/events/23-workflow-stalled-handler/flow.cylc @@ -1,6 +1,6 @@ [scheduler] [[events]] - stall handlers = cylc set-outputs %(workflow)s bar.1 + stall handler = "cylc set-outputs --flow=1 %(workflow)s bar.1" stall timeout = PT0S abort on stall timeout = False expected task failures = bar.1 diff --git a/tests/functional/events/38-task-event-handler-custom.t b/tests/functional/events/38-task-event-handler-custom.t index 7a89b3a24c1..e972165f585 100755 --- a/tests/functional/events/38-task-event-handler-custom.t +++ b/tests/functional/events/38-task-event-handler-custom.t @@ -28,7 +28,7 @@ WORKFLOW_LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" grep_ok \ "\[(('event-handler-00', 'custom-1'), 1) out\] !!CUSTOM!! foo.1 fugu Data ready for barring" \ "${FOO_ACTIVITY_LOG}" -grep_ok "\[foo.1\].*Data ready for barring" "${WORKFLOW_LOG}" -grep_ok "\[foo.1\].*Data ready for bazzing" "${WORKFLOW_LOG}" -grep_ok "\[foo.1\].*Aren't the hydrangeas nice?" "${WORKFLOW_LOG}" +grep_ok "foo\.1 .*Data ready for barring" "${WORKFLOW_LOG}" -E +grep_ok "foo\.1 .*Data ready for bazzing" "${WORKFLOW_LOG}" -E +grep_ok "foo\.1 .*Aren't the hydrangeas nice" "${WORKFLOW_LOG}" -E purge diff --git a/tests/functional/ext-trigger/01-no-nudge/flow.cylc b/tests/functional/ext-trigger/01-no-nudge/flow.cylc index 324fee21621..f19bc542c05 100644 --- a/tests/functional/ext-trigger/01-no-nudge/flow.cylc +++ b/tests/functional/ext-trigger/01-no-nudge/flow.cylc @@ -31,7 +31,7 @@ [[foo]] script = """ cylc kill "$CYLC_WORKFLOW_NAME" 'bar.1' - cylc__job__poll_grep_workflow_log '\[bar\.1\].* (internal)failed' + cylc__job__poll_grep_workflow_log -E 'bar\.1 .* \(internal\)failed' cylc release "$CYLC_WORKFLOW_NAME" 'bar.1' """ [[bar]] diff --git a/tests/functional/hold-release/02-hold-on-spawn.t b/tests/functional/hold-release/02-hold-on-spawn.t index 7213bc37b75..24c2a2a3b2c 100755 --- a/tests/functional/hold-release/02-hold-on-spawn.t +++ b/tests/functional/hold-release/02-hold-on-spawn.t @@ -33,7 +33,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --hold-after=0 "${WORKFLOW_NAM cylc release "${WORKFLOW_NAME}" foo.1 # foo.1 should run and spawn bar.1 as waiting and held -poll_grep_workflow_log 'spawned bar\.1' +poll_grep_workflow_log -E 'bar\.1 .* spawned' sqlite3 "${WORKFLOW_RUN_DIR}/log/db" \ 'SELECT cycle, name, status, is_held FROM task_pool' > task-pool.out diff --git a/tests/functional/hold-release/05-release.t b/tests/functional/hold-release/05-release.t index 1da294f5271..15cae14442b 100755 --- a/tests/functional/hold-release/05-release.t +++ b/tests/functional/hold-release/05-release.t @@ -63,7 +63,8 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[stop]] inherit = STOP script = """ - cylc__job__poll_grep_workflow_log '\[dog1\.1\] -task proxy removed (finished)' + cylc__job__poll_grep_workflow_log -E \ + 'dog1\.1 succeeded .* task proxy removed \(finished\)' cylc stop "${CYLC_WORKFLOW_NAME}" """ __FLOW_CONFIG__ diff --git a/tests/functional/hold-release/08-hold.t b/tests/functional/hold-release/08-hold.t index 9cc46df88f0..01196cd8c85 100755 --- a/tests/functional/hold-release/08-hold.t +++ b/tests/functional/hold-release/08-hold.t @@ -33,14 +33,14 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[holdrelease]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log -F 'spawned foo.1' -cylc__job__poll_grep_workflow_log -F 'spawned bar.1' -cylc__job__poll_grep_workflow_log -F 'spawned cheese.1' -cylc__job__poll_grep_workflow_log -F 'spawned jam.1' -cylc__job__poll_grep_workflow_log -F 'spawned cat1.1' -cylc__job__poll_grep_workflow_log -F 'spawned cat2.1' -cylc__job__poll_grep_workflow_log -F 'spawned dog1.1' -cylc__job__poll_grep_workflow_log -F 'spawned dog2.1' +cylc__job__poll_grep_workflow_log -E 'foo\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'bar\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'cheese\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'jam\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'cat1\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'cat2\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'dog1\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'dog2\.1 .* spawned' cylc hold ${CYLC_WORKFLOW_NAME} '*FF.1' # inexact fam cylc hold ${CYLC_WORKFLOW_NAME} 'TOAST.1' # exact fam cylc hold ${CYLC_WORKFLOW_NAME} 'cat*.1' # inexact tasks diff --git a/tests/functional/hold-release/11-retrying/flow.cylc b/tests/functional/hold-release/11-retrying/flow.cylc index 402dcebe163..9e60eb3b106 100644 --- a/tests/functional/hold-release/11-retrying/flow.cylc +++ b/tests/functional/hold-release/11-retrying/flow.cylc @@ -17,14 +17,27 @@ t-retry-able => t-analyse execution retry delays = PT15S, 2*PT1S [[t-hold-release]] script = """ - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -job(01) failed, retrying in PT15S' + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 running job:01.* \(received\)failed' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 running job:01.* => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting job:01.* retrying in PT15S' + cylc hold "${CYLC_WORKFLOW_NAME}" 't-retry-able.1' - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -running => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting job:01.* => waiting\(held\)' + cylc release "${CYLC_WORKFLOW_NAME}" 't-retry-able.1' - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -waiting => waiting (queued)' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting\(held\) job:01.* => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting job:01.* => waiting\(queued\)' """ [[t-analyse]] script = """ diff --git a/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc b/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc index f1a8ec31b82..923fb81e89a 100644 --- a/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc +++ b/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc @@ -23,9 +23,9 @@ [runtime] [[holder]] script = """ - cylc__job__poll_grep_workflow_log 'spawned t1.19900101T0000Z' - cylc__job__poll_grep_workflow_log 'spawned t2.20100101T0000Z' - cylc__job__poll_grep_workflow_log 'spawned t3.20300101T0000Z' + cylc__job__poll_grep_workflow_log -E 't1\.19900101T0000Z .* spawned' + cylc__job__poll_grep_workflow_log -E 't2\.20100101T0000Z .* spawned' + cylc__job__poll_grep_workflow_log -E 't3\.20300101T0000Z .* spawned' cylc hold "${CYLC_WORKFLOW_NAME}" '*/t*' """ [[releaser]] diff --git a/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc b/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc index c5c98d5faa6..7d7a17d1ee4 100644 --- a/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc +++ b/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc @@ -16,7 +16,7 @@ holder => releaser script = true [[holder]] script = """ -cylc__job__poll_grep_workflow_log -F 'spawned t1.1' +cylc__job__poll_grep_workflow_log -E 't1\.1 .* spawned' cylc hold "${CYLC_WORKFLOW_NAME}" 't1.1' """ [[releaser]] diff --git a/tests/functional/intelligent-host-selection/02-badhosts.t b/tests/functional/intelligent-host-selection/02-badhosts.t index 0e2b274bcf0..98c1a8c4554 100644 --- a/tests/functional/intelligent-host-selection/02-badhosts.t +++ b/tests/functional/intelligent-host-selection/02-badhosts.t @@ -60,11 +60,11 @@ LOGFILE="${WORKFLOW_RUN_DIR}/log/workflow/log" # Check that badhosttask has submit failed, but not good or mixed named_grep_ok "badhost task submit failed" \ - "\[badhosttask.1\] -submission failed" "${LOGFILE}" + "badhosttask\.1 .* submit-failed" "${LOGFILE}" named_grep_ok "goodhost suceeded" \ - "\[mixedhosttask.1\] -running => succeeded" "${LOGFILE}" + "mixedhosttask\.1 .* succeeded" "${LOGFILE}" named_grep_ok "mixedhost task suceeded" \ - "\[goodhosttask.1\] -running => succeeded" "${LOGFILE}" + "goodhosttask\.1 .* succeeded" "${LOGFILE}" # Check that when a task fail badhosts associated with that task's platform # are removed from the badhosts set. diff --git a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 index 98c0c716767..11c28b9baf4 100644 --- a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 +++ b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 @@ -7,6 +7,7 @@ INSERT INTO inheritance VALUES('root','["root"]'); INSERT INTO inheritance VALUES('foo','["foo", "root"]'); CREATE TABLE workflow_params(key TEXT, value TEXT, PRIMARY KEY(key)); INSERT INTO workflow_params VALUES('cylc_version', '8.0b2.dev'); +INSERT INTO workflow_params VALUES('flow_counter', '1'); CREATE TABLE workflow_template_vars(key TEXT, value TEXT, PRIMARY KEY(key)); CREATE TABLE task_action_timers(cycle TEXT, name TEXT, ctx_key TEXT, ctx TEXT, delays TEXT, num INTEGER, delay TEXT, timeout TEXT, PRIMARY KEY(cycle, name, ctx_key)); INSERT INTO task_action_timers VALUES('1','foo','"poll_timer"','["tuple", [[99, "running"]]]','[]',0,NULL,NULL); @@ -16,10 +17,11 @@ CREATE TABLE task_events(name TEXT, cycle TEXT, time TEXT, submit_num INTEGER, e CREATE TABLE task_jobs(cycle TEXT, name TEXT, submit_num INTEGER, is_manual_submit INTEGER, try_num INTEGER, time_submit TEXT, time_submit_exit TEXT, submit_status INTEGER, time_run TEXT, time_run_exit TEXT, run_signal TEXT, run_status INTEGER, platform_name TEXT, job_runner_name TEXT, job_id TEXT, PRIMARY KEY(cycle, name, submit_num)); CREATE TABLE task_late_flags(cycle TEXT, name TEXT, value INTEGER, PRIMARY KEY(cycle, name)); CREATE TABLE task_outputs(cycle TEXT, name TEXT, outputs TEXT, PRIMARY KEY(cycle, name)); -CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_label TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_label)); -INSERT INTO task_pool VALUES('1','foo','abcdefg', 'waiting', 0); -CREATE TABLE task_states(name TEXT, cycle TEXT, flow_label TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_label)); -INSERT INTO task_states VALUES('foo','1','abcdefg', '2019-06-14T11:30:16+01:00','2019-06-14T11:40:24+01:00',99,'waiting'); +CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_nums TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_nums)); +INSERT INTO task_pool VALUES('1','foo','["1", "2"]','waiting', 0); +CREATE TABLE task_states(name TEXT, cycle TEXT, flow_nums TEXT, time_created TEXT, +time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_nums)); +INSERT INTO task_states VALUES('foo','1','["1", "2"]', '2019-06-14T11:30:16+01:00','2019-06-14T11:40:24+01:00',99,'waiting'); CREATE TABLE task_prerequisites(cycle TEXT, name TEXT, prereq_name TEXT, prereq_cycle TEXT, prereq_output TEXT, satisfied TEXT, PRIMARY KEY(cycle, name, prereq_name, prereq_cycle, prereq_output)); CREATE TABLE task_timeout_timers(cycle TEXT, name TEXT, timeout REAL, PRIMARY KEY(cycle, name)); CREATE TABLE xtriggers(signature TEXT, results TEXT, PRIMARY KEY(signature)); diff --git a/tests/functional/job-submission/19-platform_select.t b/tests/functional/job-submission/19-platform_select.t index 67be35f60df..6f8a4787028 100755 --- a/tests/functional/job-submission/19-platform_select.t +++ b/tests/functional/job-submission/19-platform_select.t @@ -36,17 +36,17 @@ logfile="${WORKFLOW_RUN_DIR}/log/workflow/log" # Check that host = $(hostname) is correctly evaluated grep_ok \ - "platform_subshell.1.*evaluated as improbable platform name" \ + "platform_subshell\.1.*evaluated as improbable platform name" \ "${logfile}" # Check that host = `hostname` is correctly evaluated grep_ok \ - "host_subshell_backticks.1:.*\`hostname\` evaluated as localhost" \ + "host_subshell_backticks\.1.*\`hostname\` evaluated as localhost" \ "${logfile}" # Check that platform = $(echo "improbable platform name") correctly evaluated grep_ok \ - "platform_subshell.1:.*evaluated as improbable platform name" \ + "platform_subshell\.1.*evaluated as improbable platform name" \ "${logfile}" purge diff --git a/tests/functional/logging/02-duplicates/flow.cylc b/tests/functional/logging/02-duplicates/flow.cylc index e0d7e591a14..4165b9ac693 100644 --- a/tests/functional/logging/02-duplicates/flow.cylc +++ b/tests/functional/logging/02-duplicates/flow.cylc @@ -22,7 +22,7 @@ script = false [[bar]] script = """ -cylc set-outputs "${CYLC_WORKFLOW_NAME}" "foo.${CYLC_TASK_CYCLE_POINT}" +cylc set-outputs --flow=1 "${CYLC_WORKFLOW_NAME}" "foo.${CYLC_TASK_CYCLE_POINT}" """ [[restart]] script = """ diff --git a/tests/functional/pause-resume/12-pause-then-retry/flow.cylc b/tests/functional/pause-resume/12-pause-then-retry/flow.cylc index 8d982c896d3..18849f0f170 100644 --- a/tests/functional/pause-resume/12-pause-then-retry/flow.cylc +++ b/tests/functional/pause-resume/12-pause-then-retry/flow.cylc @@ -19,21 +19,29 @@ [[t-pause]] script = """ cylc pause "${CYLC_WORKFLOW_NAME}" + cylc__job__poll_grep_workflow_log -F 'Command succeeded: pause' + # Poll t-submit-retry-able, should return submit-fail cylc poll "${CYLC_WORKFLOW_NAME}" 't-submit-retry-able' + # Allow t-retry-able to continue rm -f "${CYLC_WORKFLOW_RUN_DIR}/file" - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -running => waiting' - cylc__job__poll_grep_workflow_log -F \ - '[t-submit-retry-able.1] -submitted => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 running .* => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-submit-retry-able\.1 submitted .* => waiting' + # Resume the workflow cylc play "${CYLC_WORKFLOW_NAME}" - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -waiting => waiting (queued)' - cylc__job__poll_grep_workflow_log -F \ - '[t-submit-retry-able.1] -waiting => waiting (queued)' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting .* => waiting\(queued\)' + + cylc__job__poll_grep_workflow_log -E \ + 't-submit-retry-able\.1 waiting .* => waiting\(queued\)' """ [[t-retry-able]] script = """ diff --git a/tests/functional/pre-initial/warm-insert/flow.cylc b/tests/functional/pre-initial/warm-insert/flow.cylc index f5854261089..de61a99b865 100644 --- a/tests/functional/pre-initial/warm-insert/flow.cylc +++ b/tests/functional/pre-initial/warm-insert/flow.cylc @@ -25,4 +25,6 @@ [[INSERT_FAM]] [[foo,bar]] [[inserter]] - script = cylc trigger --reflow $CYLC_WORKFLOW_NAME foo.20100101T1200Z + script = """ + cylc trigger --reflow --meta="other" $CYLC_WORKFLOW_NAME foo.20100101T1200Z + """ diff --git a/tests/functional/reload/11-retrying/flow.cylc b/tests/functional/reload/11-retrying/flow.cylc index 524c0e44041..0bf1477afa8 100644 --- a/tests/functional/reload/11-retrying/flow.cylc +++ b/tests/functional/reload/11-retrying/flow.cylc @@ -20,7 +20,7 @@ fi execution retry delays = PT0S [[reloader]] script = """ -cylc__job__poll_grep_workflow_log -F '[retrier.1] -running (held) => waiting (held)' +cylc__job__poll_grep_workflow_log -E 'retrier\.1 running\(held\) .* => waiting\(held\)' cylc reload "${CYLC_WORKFLOW_NAME}" cylc reload "${CYLC_WORKFLOW_NAME}" cylc__job__poll_grep_workflow_log -F 'Reload completed' diff --git a/tests/functional/reload/14-waiting/flow.cylc b/tests/functional/reload/14-waiting/flow.cylc index 663e823bb43..e96603e7bcd 100644 --- a/tests/functional/reload/14-waiting/flow.cylc +++ b/tests/functional/reload/14-waiting/flow.cylc @@ -24,7 +24,7 @@ done [[reloader]] script = """ cylc reload "${CYLC_WORKFLOW_NAME}" -cylc__job__poll_grep_workflow_log -F '[waiter.1] -reloaded task definition' +cylc__job__poll_grep_workflow_log -E 'waiter\.1 .* reloaded task definition' rm -f "${CYLC_WORKFLOW_WORK_DIR}/1/sleeping-waiter/file" rm -f "${CYLC_WORKFLOW_WORK_DIR}/1/starter/file" """ diff --git a/tests/functional/reload/17-graphing-change.t b/tests/functional/reload/17-graphing-change.t index 83dbebd6000..d2824653d4c 100755 --- a/tests/functional/reload/17-graphing-change.t +++ b/tests/functional/reload/17-graphing-change.t @@ -65,8 +65,9 @@ grep_ok "Removed task: 'one'" "${LOG_FILE}" cp "${TEST_SOURCE_DIR}/graphing-change/flow-2.cylc" \ "${RUN_DIR}/${WORKFLOW_NAME}/flow.cylc" -cylc set-outputs "${WORKFLOW_NAME}" foo.1 -cylc set-outputs "${WORKFLOW_NAME}" baz.1 +# Spawn a couple of task proxies, to get "task definition removed" message. +cylc set-outputs --flow=1 "${WORKFLOW_NAME}" foo.1 +cylc set-outputs --flow=1 "${WORKFLOW_NAME}" baz.1 # reload workflow run_ok "${TEST_NAME_BASE}-swap-reload" cylc reload "${WORKFLOW_NAME}" poll grep_workflow_log_n_times 'Reload completed' 3 diff --git a/tests/functional/reload/19-remote-kill/flow.cylc b/tests/functional/reload/19-remote-kill/flow.cylc index ed599170dfc..5aba73dc488 100644 --- a/tests/functional/reload/19-remote-kill/flow.cylc +++ b/tests/functional/reload/19-remote-kill/flow.cylc @@ -17,7 +17,7 @@ cylc reload "${CYLC_WORKFLOW_NAME}" cylc__job__poll_grep_workflow_log -F 'Reload completed' cylc kill "${CYLC_WORKFLOW_NAME}" 'foo.1' - cylc__job__poll_grep_workflow_log -F '[foo.1] -job(01) killed' + cylc__job__poll_grep_workflow_log -E 'foo\.1 failed\(held\) job:01.* job killed' """ [[[job]]] execution time limit = PT1M diff --git a/tests/functional/reload/runahead/flow.cylc b/tests/functional/reload/runahead/flow.cylc index 60d1d3c38c8..ef649f3a4b9 100644 --- a/tests/functional/reload/runahead/flow.cylc +++ b/tests/functional/reload/runahead/flow.cylc @@ -18,7 +18,7 @@ script = true [[reloader]] script = """ -cylc__job__poll_grep_workflow_log '\[foo.* (received)failed' +cylc__job__poll_grep_workflow_log -E "foo\.${CYLC_TASK_CYCLE_POINT} running .*\(received\)failed" perl -pi -e 's/(runahead limit = )P2( # marker)/\1 P4\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc cylc reload $CYLC_WORKFLOW_NAME """ diff --git a/tests/functional/remote/05-remote-init.t b/tests/functional/remote/05-remote-init.t index bef6da5b91c..2a9b3426866 100644 --- a/tests/functional/remote/05-remote-init.t +++ b/tests/functional/remote/05-remote-init.t @@ -55,12 +55,9 @@ f|0|0|ariel g|0|0|localhost __SELECT__ -grep_ok "WARNING - Incomplete tasks:" \ - "${TEST_NAME_BASE}-run.stderr" -grep_ok "a.1 did not complete required outputs" \ - "${TEST_NAME_BASE}-run.stderr" -grep_ok "b.1 did not complete required outputs" \ - "${TEST_NAME_BASE}-run.stderr" +grep_ok "WARNING - Incomplete tasks:" "${TEST_NAME_BASE}-run.stderr" +grep_ok "a.1 did not complete required outputs" "${TEST_NAME_BASE}-run.stderr" +grep_ok "b.1 did not complete required outputs" "${TEST_NAME_BASE}-run.stderr" purge exit diff --git a/tests/functional/remote/06-poll.t b/tests/functional/remote/06-poll.t index b49c41ba047..d10b5d2dfaf 100644 --- a/tests/functional/remote/06-poll.t +++ b/tests/functional/remote/06-poll.t @@ -52,8 +52,8 @@ log_scan \ "$(cylc cat-log -m p "$WORKFLOW_NAME")" \ 10 \ 1 \ - '\[foo.1\] status=submitted: (polled)foo' \ - '\[foo.1\] status=succeeded: (polled)succeeded' + '\[foo\.1 submitted .* (polled)foo' \ + '\[foo\.1 succeeded .* (polled)succeeded' purge exit diff --git a/tests/functional/restart/08-stop-after-cycle-point.t b/tests/functional/restart/08-stop-after-cycle-point.t index 871c30ea075..4fb85da8ef0 100644 --- a/tests/functional/restart/08-stop-after-cycle-point.t +++ b/tests/functional/restart/08-stop-after-cycle-point.t @@ -64,7 +64,6 @@ cmp_ok stopcp.out <<< '1971' cmp_ok taskpool.out << '__OUT__' 1971|hello|waiting __OUT__ - # Check that the command line stop point works (even after restart)... workflow_run_ok "${TEST_NAME_BASE}-2-restart" \ cylc play --no-detach "${WORKFLOW_NAME}" diff --git a/tests/functional/restart/08-stop-after-cycle-point/reference.log b/tests/functional/restart/08-stop-after-cycle-point/reference.log new file mode 100644 index 00000000000..a2e5868047a --- /dev/null +++ b/tests/functional/restart/08-stop-after-cycle-point/reference.log @@ -0,0 +1,4 @@ +Initial point: 19700101T0000Z +Final point: 19700101T0300Z +[hello.19700101T0000Z] -triggered off ['hello.19691231T2300Z'] +[hello.19700101T0100Z] -triggered off ['hello.19700101T0000Z'] diff --git a/tests/functional/restart/22-hold/flow.cylc b/tests/functional/restart/22-hold/flow.cylc index 176b4814743..30570843aef 100644 --- a/tests/functional/restart/22-hold/flow.cylc +++ b/tests/functional/restart/22-hold/flow.cylc @@ -17,7 +17,7 @@ [[t1]] script = """ if [[ "${CYLC_TASK_CYCLE_POINT}" == '2016' ]]; then - cylc__job__poll_grep_workflow_log -F 'spawned t2.2016' + cylc__job__poll_grep_workflow_log -E 't2\.2016 .* spawned' cylc hold "${CYLC_WORKFLOW_NAME}" t2.2016 t2.2017 cylc stop "${CYLC_WORKFLOW_NAME}" else diff --git a/tests/functional/restart/53-task-prerequisites/flow.cylc b/tests/functional/restart/53-task-prerequisites/flow.cylc index 02c46e7cbe4..3a1caa48143 100644 --- a/tests/functional/restart/53-task-prerequisites/flow.cylc +++ b/tests/functional/restart/53-task-prerequisites/flow.cylc @@ -14,7 +14,13 @@ """ [runtime] [[foo]] - script = if [[ "$CYLC_TASK_JOB" == '1/foo/01' ]]; then false; else true; fi + script = """ + if [[ "$CYLC_TASK_JOB" == '1/foo/01' ]]; then + false + else + true + fi + """ [[apollo]] script = cylc message -- "The Eagle has landed" [[[outputs]]] diff --git a/tests/functional/runahead/06-release-update.t b/tests/functional/runahead/06-release-update.t index c7f6c024509..6f9d0475781 100644 --- a/tests/functional/runahead/06-release-update.t +++ b/tests/functional/runahead/06-release-update.t @@ -27,7 +27,7 @@ CYLC_RUN_PID="$!" poll_workflow_running YYYY="$(date +%Y)" NEXT1=$(( YYYY + 1 )) -poll_grep_workflow_log -F "spawned bar.${NEXT1}" +poll_grep_workflow_log -E "bar\.${NEXT1} .* spawned" # sleep a little to allow the datastore to update (`cylc dump` sees the # datastore) TODO can we avoid this flaky sleep somehow? diff --git a/tests/functional/runahead/default-future/flow.cylc b/tests/functional/runahead/default-future/flow.cylc index d525f3bd186..706993f0ca9 100644 --- a/tests/functional/runahead/default-future/flow.cylc +++ b/tests/functional/runahead/default-future/flow.cylc @@ -27,7 +27,7 @@ [[spawner]] script = """ # spawn wibble - cylc set-outputs $CYLC_WORKFLOW_ID foo.20100101T0800Z + cylc set-outputs --flow=1 $CYLC_WORKFLOW_ID foo.20100101T0800Z """ [[foo]] script = false diff --git a/tests/functional/spawn-on-demand/01-reflow.t b/tests/functional/spawn-on-demand/01-reflow.t index f0aecf1a882..6c8a2e05c5a 100644 --- a/tests/functional/spawn-on-demand/01-reflow.t +++ b/tests/functional/spawn-on-demand/01-reflow.t @@ -16,7 +16,7 @@ # along with this program. If not, see . #------------------------------------------------------------------------------- -# Check that triggering with --reflow does cause reflow. +# Check that triggering with --flow starts a new flow. . "$(dirname "$0")/test_header" set_test_number 2 reftest diff --git a/tests/functional/spawn-on-demand/01-reflow/flow.cylc b/tests/functional/spawn-on-demand/01-reflow/flow.cylc index 4bd486b18bd..7cae2411756 100644 --- a/tests/functional/spawn-on-demand/01-reflow/flow.cylc +++ b/tests/functional/spawn-on-demand/01-reflow/flow.cylc @@ -12,5 +12,5 @@ [[triggerer]] script = """ # Cause both bar.1 and baz.1 to run again. -cylc trigger --reflow ${CYLC_WORKFLOW_ID} bar.1 +cylc trigger --reflow --meta=cheese ${CYLC_WORKFLOW_ID} bar.1 """ diff --git a/tests/functional/spawn-on-demand/02-merge.t b/tests/functional/spawn-on-demand/02-merge.t index 1f87659e528..f693c490ba1 100644 --- a/tests/functional/spawn-on-demand/02-merge.t +++ b/tests/functional/spawn-on-demand/02-merge.t @@ -16,83 +16,46 @@ # along with this program. If not, see . #------------------------------------------------------------------------------- -# Check that reflows merge correctly if they catch up, AND that redundant flow -# labels get merged. +# Check that flows merge correctly. . "$(dirname "$0")/test_header" install_workflow "${TEST_NAME_BASE}" -set_test_number 6 +set_test_number 4 -# validate TEST_NAME="${TEST_NAME_BASE}"-validate run_ok "${TEST_NAME}" cylc validate "${WORKFLOW_NAME}" -# Set frequent pruning of merged flow labels. -create_test_global_config "" " -[scheduler] - [[main loop]] - [[[prune flow labels]]] - interval = PT10S" - -# reference test TEST_NAME="${TEST_NAME_BASE}"-run -workflow_run_ok "${TEST_NAME}" cylc play --reference-test --no-detach "${WORKFLOW_NAME}" - -# extract flow labels from job files -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 1 -f j "${WORKFLOW_NAME}" foo.1 | grep CYLC_TASK_FLOW_LABEL) -FLOW_ONE="${CYLC_TASK_FLOW_LABEL}" - -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 2 -f j "${WORKFLOW_NAME}" foo.1 | grep CYLC_TASK_FLOW_LABEL) -FLOW_TWO="${CYLC_TASK_FLOW_LABEL}" - -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 1 -f j "${WORKFLOW_NAME}" bar.3 | grep CYLC_TASK_FLOW_LABEL) -FLOW_MERGED="${CYLC_TASK_FLOW_LABEL}" +workflow_run_ok "${TEST_NAME}" cylc play --reference-test \ + --debug --no-detach "${WORKFLOW_NAME}" -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 1 -f j "${WORKFLOW_NAME}" baz.3 | grep CYLC_TASK_FLOW_LABEL) -FLOW_PRUNED="${CYLC_TASK_FLOW_LABEL}" - -# compare with expected tasks in each flow (original, reflow, merged, pruned) +# check the DB as well sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \ - "SELECT name, cycle, flow_label FROM task_states \ + "SELECT name, cycle, flow_nums FROM task_states \ WHERE submit_num is 1 order by cycle" \ > flow-one.db -run_ok check_merged_label eval "test $FLOW_MERGED == $FLOW_ONE$FLOW_TWO || \ - test $FLOW_MERGED == $FLOW_TWO$FLOW_ONE" - -run_ok check_pruned_label eval "test $FLOW_PRUNED == $FLOW_ONE || \ - test $FLOW_PRUNED == $FLOW_TWO" - cmp_ok flow-one.db - << __OUT__ -foo|1|${FLOW_ONE} -bar|1|${FLOW_ONE} -baz|1|${FLOW_ONE} -foo|2|${FLOW_ONE} -bar|2|${FLOW_ONE} -baz|2|${FLOW_ONE} -foo|3|${FLOW_ONE} -foo|3|${FLOW_MERGED} -bar|3|${FLOW_MERGED} -baz|3|${FLOW_PRUNED} +foo|1|[1] +bar|1|[1] +foo|2|[1] +bar|2|[1] +foo|3|[1] +foo|3|[1, 2] +bar|3|[1, 2] __OUT__ sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \ - "SELECT name, cycle, flow_label FROM task_states \ + "SELECT name, cycle, flow_nums FROM task_states \ WHERE submit_num is 2 order by cycle" \ > flow-two.db cmp_ok flow-two.db - << __OUT__ -foo|1|${FLOW_TWO} -bar|1|${FLOW_TWO} -baz|1|${FLOW_TWO} -foo|2|${FLOW_TWO} -bar|2|${FLOW_TWO} -baz|2|${FLOW_TWO} +foo|1|[2] +bar|1|[2] +foo|2|[2] +bar|2|[2] __OUT__ purge diff --git a/tests/functional/spawn-on-demand/02-merge/flow.cylc b/tests/functional/spawn-on-demand/02-merge/flow.cylc index 676ea27ac37..e69432fd1a7 100644 --- a/tests/functional/spawn-on-demand/02-merge/flow.cylc +++ b/tests/functional/spawn-on-demand/02-merge/flow.cylc @@ -1,25 +1,30 @@ -# foo.3 triggers a reflow at foo.1 and waits for it to catch up and merge. -# This results in a completely merged flow, e.g. u, V -> uV at foo.3 and bar.3. -# Then bar.3 waits for the merged labels to be pruned, e.g. uV -> u (or V). -[scheduler] - allow implicit tasks = True +# foo.3 triggers a new flow at foo.1 and waits for it to catch up and merge. +# bar checks for the expected flow names at each cycle point. [scheduling] cycling mode = integer initial cycle point = 1 final cycle point = 3 [[graph]] - P1 = "foo[-P1] => foo => bar => baz" + P1 = "foo[-P1] => foo => bar" [runtime] [[foo]] - script = """ -if (( CYLC_TASK_CYCLE_POINT == 3 )); then - cylc trigger --reflow ${CYLC_WORKFLOW_ID} foo.1 - cylc__job__poll_grep_workflow_log 'merged flow' -fi - """ + script = """ + if (( CYLC_TASK_CYCLE_POINT == 3 )); then + cylc trigger --reflow --meta=other ${CYLC_WORKFLOW_ID} foo.1 + cylc__job__poll_grep_workflow_log 'Merged in' + fi + """ [[bar]] - script = """ -if (( CYLC_TASK_CYCLE_POINT == 3 )); then - cylc__job__poll_grep_workflow_log 'returning flow label(s)' -fi - """ + script = """ + if [[ $CYLC_TASK_JOB == *01 ]]; then + # job(01) + if (( CYLC_TASK_CYCLE_POINT == 3 )); then + test $CYLC_TASK_FLOWS == "1,2" + else + test $CYLC_TASK_FLOWS == "1" + fi + else + # job(02) + test $CYLC_TASK_FLOWS == "2" + fi + """ diff --git a/tests/functional/spawn-on-demand/02-merge/reference.log b/tests/functional/spawn-on-demand/02-merge/reference.log index 215c9298d95..fa9706f7c88 100644 --- a/tests/functional/spawn-on-demand/02-merge/reference.log +++ b/tests/functional/spawn-on-demand/02-merge/reference.log @@ -5,13 +5,8 @@ Final point: 3 [bar.1] -triggered off ['foo.1'] [foo.3] -triggered off ['foo.2'] [bar.2] -triggered off ['foo.2'] -[baz.1] -triggered off ['bar.1'] [foo.1] -triggered off ['foo.0'] -[baz.2] -triggered off ['bar.2'] [foo.2] -triggered off ['foo.1'] [bar.1] -triggered off ['foo.1'] [bar.2] -triggered off ['foo.2'] -[baz.1] -triggered off ['bar.1'] [bar.3] -triggered off ['foo.3'] -[baz.2] -triggered off ['bar.2'] -[baz.3] -triggered off ['bar.3'] diff --git a/tests/functional/spawn-on-demand/04-branch/flow.cylc b/tests/functional/spawn-on-demand/04-branch/flow.cylc index bc4d18bbf26..60df08deace 100644 --- a/tests/functional/spawn-on-demand/04-branch/flow.cylc +++ b/tests/functional/spawn-on-demand/04-branch/flow.cylc @@ -1,4 +1,4 @@ -# Check branching without SoS suicide triggers. +# Check SOD branching without suicide triggers. # Scheduler should shut down normally even though one branch does not run. [scheduler] allow implicit tasks = True diff --git a/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc b/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc index aa67ed1e369..a997b7a35b8 100644 --- a/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc +++ b/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc @@ -9,7 +9,6 @@ [runtime] [[bar]] script = """ -eval $(cylc cat-log -f j "${CYLC_WORKFLOW_ID}" foo.1 | grep CYLC_TASK_FLOW_LABEL) -cylc stop --flow=${CYLC_TASK_FLOW_LABEL} ${CYLC_WORKFLOW_ID} -cylc__job__poll_grep_workflow_log 'Command succeeded: stop' + cylc stop --flow=1 ${CYLC_WORKFLOW_ID} + cylc__job__poll_grep_workflow_log 'Command succeeded: stop' """ diff --git a/tests/functional/spawn-on-demand/06-stop-flow-2.t b/tests/functional/spawn-on-demand/06-stop-flow-2.t index b40f83a1cab..8e2c41c7bc2 100644 --- a/tests/functional/spawn-on-demand/06-stop-flow-2.t +++ b/tests/functional/spawn-on-demand/06-stop-flow-2.t @@ -16,7 +16,7 @@ # along with this program. If not, see . #------------------------------------------------------------------------------- -# Check that stopping the only flow causes the scheduler to shut down. +# Check that other flows can be stopped without affecting the main flow. . "$(dirname "$0")/test_header" set_test_number 2 diff --git a/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc b/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc index bb427b5158c..7ff895fe768 100644 --- a/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc +++ b/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc @@ -13,15 +13,14 @@ [[bar]] script = """ if (( CYLC_TASK_SUBMIT_NUMBER == 2 )); then - eval $(cylc cat-log -f j -s 2 "${CYLC_WORKFLOW_ID}" foo.1 | grep CYLC_TASK_FLOW_LABEL) - cylc stop --flow=${CYLC_TASK_FLOW_LABEL} ${CYLC_WORKFLOW_ID} - cylc__job__poll_grep_workflow_log "Command succeeded: stop(.*flow_label=$CYLC_TASK_FLOW_LABEL" + cylc stop --flow=1 ${CYLC_WORKFLOW_ID} + cylc__job__poll_grep_workflow_log "Command succeeded: stop" fi """ [[baz]] script = """ if (( CYLC_TASK_SUBMIT_NUMBER == 1 )); then - cylc trigger --reflow ${CYLC_WORKFLOW_ID} foo.1 - cylc__job__poll_grep_workflow_log "\[bar\.1\].*succeeded.*job(02)" + cylc trigger --reflow --meta=other ${CYLC_WORKFLOW_ID} foo.1 + cylc__job__poll_grep_workflow_log -E "bar\.1 running job:02.* => succeeded" fi """ diff --git a/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc b/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc index 4c76db21b09..2c2a77c4085 100644 --- a/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc +++ b/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc @@ -16,7 +16,7 @@ script = """ # Ensure that bar.1,2 are spawned by foo.1,2 and not by start.2 # (so the scheduler must update their prereqs when start.2 finishes). -cylc__job__poll_grep_workflow_log "spawned bar\.2" +cylc__job__poll_grep_workflow_log -E "bar\.2 .* spawned" """ [[foo]] [[bar]] diff --git a/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc b/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc index 6b4501daeac..f4122e049a8 100644 --- a/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc +++ b/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc @@ -16,7 +16,7 @@ foo & bar & setter # Task scripting below ensures that foo is still in the pool, but - # but bar is gone, when its outputs get set - just to make it clear + # bar is gone, when its outputs get set - just to make it clear # the target task doesn't have to exist. foo:out1? => qux @@ -35,7 +35,7 @@ [[foo]] # Hang about until setter is finished. script = """ - cylc__job__poll_grep_workflow_log "\[setter\.1\].*succeeded" + cylc__job__poll_grep_workflow_log -E "setter\.1 .* => succeeded" """ [[bar]] script = true @@ -43,11 +43,11 @@ # (To the rescue). script = """ # Set foo outputs while it still exists in the pool. - cylc set-outputs --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}" 1/foo + cylc set-outputs --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}" 1/foo # Set bar outputs after it is gone from the pool. - cylc__job__poll_grep_workflow_log "\[bar\.1\] -task proxy removed" - cylc set-outputs --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}" 1/bar + cylc__job__poll_grep_workflow_log -E "bar\.1 .*task proxy removed" + cylc set-outputs --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}" 1/bar """ [[qux, quw, fux, fuw]] script = true diff --git a/tests/functional/spawn-on-demand/10-retrigger/flow.cylc b/tests/functional/spawn-on-demand/10-retrigger/flow.cylc index 92ffb983c02..9f226237c00 100644 --- a/tests/functional/spawn-on-demand/10-retrigger/flow.cylc +++ b/tests/functional/spawn-on-demand/10-retrigger/flow.cylc @@ -18,7 +18,7 @@ """ [[triggerer]] script = """ - cylc__job__poll_grep_workflow_log '\[oops\.1\].* (received)failed' + cylc__job__poll_grep_workflow_log -E 'oops\.1 running .* \(received\)failed' cylc trigger ${CYLC_WORKFLOW_ID} oops.1 """ [[foo, bar]] diff --git a/tests/functional/startup/00-state-summary.t b/tests/functional/startup/00-state-summary.t index d161a7ab609..5de22868a13 100644 --- a/tests/functional/startup/00-state-summary.t +++ b/tests/functional/startup/00-state-summary.t @@ -30,7 +30,7 @@ run_ok "${TEST_NAME}" cylc validate "${WORKFLOW_NAME}" cylc play --no-detach "${WORKFLOW_NAME}" > /dev/null 2>&1 # Restart with a failed task and a succeeded task. cylc play "${WORKFLOW_NAME}" -poll_grep_workflow_log -F '[foo.1] status=failed: (polled)failed' +poll_grep_workflow_log -E 'foo\.1 .* \(polled\)failed' cylc dump "${WORKFLOW_NAME}" > dump.out TEST_NAME=${TEST_NAME_BASE}-grep # State summary should not just say "Initializing..." diff --git a/tests/functional/triggering/19-and-suicide/flow.cylc b/tests/functional/triggering/19-and-suicide/flow.cylc index 151668bcff9..2335c54c0f3 100644 --- a/tests/functional/triggering/19-and-suicide/flow.cylc +++ b/tests/functional/triggering/19-and-suicide/flow.cylc @@ -16,7 +16,7 @@ [[t0]] # https://github.com/cylc/cylc-flow/issues/2655 # "t2.1" should not suicide on "t1.1:failed" - script = cylc__job__poll_grep_workflow_log '\[t1\.1\].* (received)failed' + script = cylc__job__poll_grep_workflow_log -E 't1\.1 .* \(received\)failed' [[t1]] script = false [[t2]] diff --git a/tests/functional/xtriggers/03-sequence.t b/tests/functional/xtriggers/03-sequence.t index 25d2ee59dc9..e6253179271 100644 --- a/tests/functional/xtriggers/03-sequence.t +++ b/tests/functional/xtriggers/03-sequence.t @@ -49,7 +49,7 @@ run_ok "${TEST_NAME_BASE}-val" cylc validate 'flow.cylc' # Run workflow; it will stall waiting on the never-satisfied xtriggers. cylc play "${WORKFLOW_NAME}" -poll_grep_workflow_log 'start.2025.*succeeded' +poll_grep_workflow_log -E 'start\.2025 .* => succeeded' cylc show "${WORKFLOW_NAME}" foo.2026 | grep -E '^ - xtrigger' > foo.2026.log diff --git a/tests/integration/test_data_store_mgr.py b/tests/integration/test_data_store_mgr.py index 994dc37d855..c4a576857c1 100644 --- a/tests/integration/test_data_store_mgr.py +++ b/tests/integration/test_data_store_mgr.py @@ -281,7 +281,7 @@ def test_delta_task_prerequisite(harness): schd.pool.force_spawn_children([ t.identity for t in schd.pool.get_all_tasks() - ], (TASK_STATUS_SUCCEEDED,)) + ], (TASK_STATUS_SUCCEEDED,), "flow1") assert all({ p.satisfied for t in schd.data_store_mgr.updated[TASK_PROXIES].values() diff --git a/tests/integration/test_resolvers.py b/tests/integration/test_resolvers.py index abbb1a5b23c..d95201b3248 100644 --- a/tests/integration/test_resolvers.py +++ b/tests/integration/test_resolvers.py @@ -199,7 +199,8 @@ async def test_nodes_mutator(mock_flow, flow_args): flow_args['workflows'].append((mock_flow.owner, mock_flow.name, None)) ids = [parse_node_id(n, TASK_PROXIES) for n in mock_flow.node_ids] response = await mock_flow.resolvers.nodes_mutator( - None, 'force_trigger_tasks', ids, flow_args, {} + None, 'force_trigger_tasks', ids, flow_args, + {"reflow": False, "flow_descr": ""} ) assert response[0]['id'] == mock_flow.id diff --git a/tests/unit/test_job_file.py b/tests/unit/test_job_file.py index 74fb886a9d5..8cf75a11940 100644 --- a/tests/unit/test_job_file.py +++ b/tests/unit/test_job_file.py @@ -88,7 +88,7 @@ def test_write(mocked_get_remote_workflow_run_dir, fixture_get_platform): 'duck': '~quack'}, "job_d": "1/baa/01", "try_num": 1, - "flow_label": "aZ", + "flow_nums": {1}, # "job_runner_name": "background", "param_var": {"duck": "quack", "mouse": "squeak"}, @@ -382,7 +382,7 @@ def test_write_task_environment(): 'CYLC_TASK_NAMESPACE_HIERARCHY="baa moo"\n export ' 'CYLC_TASK_DEPENDENCIES="moo neigh quack"\n export ' 'CYLC_TASK_TRY_NUMBER=1\n export ' - 'CYLC_TASK_FLOW_LABEL=aZ\n export ' + 'CYLC_TASK_FLOWS=1\n export ' 'CYLC_TASK_PARAM_duck="quack"\n export ' 'CYLC_TASK_PARAM_mouse="squeak"\n ' 'CYLC_TASK_WORK_DIR_BASE=\'farm_noises/work_d\'\n}') @@ -392,7 +392,7 @@ def test_write_task_environment(): "namespace_hierarchy": ["baa", "moo"], "dependencies": ['moo', 'neigh', 'quack'], "try_num": 1, - "flow_label": "aZ", + "flow_nums": {1}, "param_var": {"duck": "quack", "mouse": "squeak"}, "work_d": "farm_noises/work_d" diff --git a/tests/unit/test_xtrigger_mgr.py b/tests/unit/test_xtrigger_mgr.py index 7c717cef34a..1493f8cdd5a 100644 --- a/tests/unit/test_xtrigger_mgr.py +++ b/tests/unit/test_xtrigger_mgr.py @@ -19,7 +19,6 @@ from cylc.flow.cycling.iso8601 import ISO8601Point, ISO8601Sequence, init from cylc.flow.subprocctx import SubFuncContext from cylc.flow.task_proxy import TaskProxy -from cylc.flow.task_pool import FlowLabelMgr from cylc.flow.taskdef import TaskDef from cylc.flow.xtrigger_mgr import RE_STR_TMPL @@ -141,8 +140,7 @@ def test_housekeeping_with_xtrigger_satisfied(xtrigger_mgr): sequence = ISO8601Sequence('P1D', '2019') tdef.xtrig_labels[sequence] = ["get_name"] start_point = ISO8601Point('2019') - itask = TaskProxy( - tdef, start_point, FlowLabelMgr().get_new_label()) + itask = TaskProxy(tdef, start_point) # pretend the function has been activated xtrigger_mgr.active.append(xtrig.get_signature()) xtrigger_mgr.callback(xtrig) @@ -189,8 +187,7 @@ def test__call_xtriggers_async(xtrigger_mgr): init() start_point = ISO8601Point('2019') # create task proxy - itask = TaskProxy( - tdef, start_point, FlowLabelMgr().get_new_label()) + itask = TaskProxy(tdef, start_point) # we start with no satisfied xtriggers, and nothing active assert len(xtrigger_mgr.sat_xtrig) == 0 @@ -291,8 +288,7 @@ def test_check_xtriggers(xtrigger_mgr): sequence = ISO8601Sequence('P1D', '2019') tdef1.xtrig_labels[sequence] = ["get_name"] start_point = ISO8601Point('2019') - itask1 = TaskProxy( - tdef1, start_point, FlowLabelMgr().get_new_label()) + itask1 = TaskProxy(tdef1, start_point) itask1.state.xtriggers["get_name"] = False # satisfied? # add a clock xtrigger @@ -316,8 +312,7 @@ def test_check_xtriggers(xtrigger_mgr): init() start_point = ISO8601Point('20000101T0000+05') # create task proxy - itask2 = TaskProxy( - tdef2, start_point, FlowLabelMgr().get_new_label()) + TaskProxy(tdef2, start_point) xtrigger_mgr.check_xtriggers(itask1, lambda foo: None) # won't be satisfied, as it is async, we are are not calling callback From f6f15a0da3cfc5852e7b2d8e8a028fa73f06fc17 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Mon, 13 Sep 2021 21:33:59 +1200 Subject: [PATCH 02/14] Post cherry-pick fix. --- cylc/flow/task_pool.py | 2 +- tests/functional/events/23-workflow-stalled-handler/flow.cylc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 34197c5c704..5d1c10795b9 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -1249,7 +1249,7 @@ def spawn_on_all_outputs(self, itask): ) if c_task is not None: # already spawned - return + continue # Spawn child only if itask.flow_nums is not empty. c_task = self.spawn_task(c_name, c_point, itask.flow_nums) if c_task is not None: diff --git a/tests/functional/events/23-workflow-stalled-handler/flow.cylc b/tests/functional/events/23-workflow-stalled-handler/flow.cylc index 4cd521ed24e..a3eb23a9632 100644 --- a/tests/functional/events/23-workflow-stalled-handler/flow.cylc +++ b/tests/functional/events/23-workflow-stalled-handler/flow.cylc @@ -1,6 +1,6 @@ [scheduler] [[events]] - stall handler = "cylc set-outputs --flow=1 %(workflow)s bar.1" + stall handlers = "cylc set-outputs --flow=1 %(workflow)s bar.1" stall timeout = PT0S abort on stall timeout = False expected task failures = bar.1 From 4d3c8cd026206c9f2bc9de56305ea832841441f3 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Tue, 14 Sep 2021 15:19:13 +1200 Subject: [PATCH 03/14] Get test coverage up a bit. --- cylc/flow/task_events_mgr.py | 6 +-- tests/functional/cli/04-cli-error.t | 48 +++++++++++++++++++ tests/functional/cylc-kill/03-simulation.t | 42 ++++++++++++++++ .../cylc-kill/03-simulation/flow.cylc | 12 +++++ .../cylc-trigger/06-already-active.t | 32 +++++++++++++ .../cylc-trigger/06-already-active/flow.cylc | 22 +++++++++ .../restart/53-task-prerequisites/flow.cylc | 6 +-- tests/functional/runahead/06-release-update.t | 7 +-- .../spawn-on-demand/11-hold-not-spawned.t | 22 +++++++++ .../11-hold-not-spawned/flow.cylc | 20 ++++++++ .../11-hold-not-spawned/reference.log | 4 ++ .../functional/triggering/18-suicide-active.t | 34 +++++++++++++ .../triggering/18-suicide-active/flow.cylc | 11 +++++ .../20-and-outputs-suicide/flow.cylc | 1 - 14 files changed, 254 insertions(+), 13 deletions(-) create mode 100644 tests/functional/cli/04-cli-error.t create mode 100755 tests/functional/cylc-kill/03-simulation.t create mode 100644 tests/functional/cylc-kill/03-simulation/flow.cylc create mode 100644 tests/functional/cylc-trigger/06-already-active.t create mode 100644 tests/functional/cylc-trigger/06-already-active/flow.cylc create mode 100644 tests/functional/spawn-on-demand/11-hold-not-spawned.t create mode 100644 tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc create mode 100644 tests/functional/spawn-on-demand/11-hold-not-spawned/reference.log create mode 100644 tests/functional/triggering/18-suicide-active.t create mode 100644 tests/functional/triggering/18-suicide-active/flow.cylc diff --git a/cylc/flow/task_events_mgr.py b/cylc/flow/task_events_mgr.py index 47eb20e4561..130b17a0a68 100644 --- a/cylc/flow/task_events_mgr.py +++ b/cylc/flow/task_events_mgr.py @@ -1185,11 +1185,9 @@ def _setup_custom_event_handlers(self, itask, event, message): # fmt: on cmd = handler % (handler_data) except KeyError as exc: - LOG.critical( - itask.point, itask.tdef.name, itask.submit_num, + LOG.error( f"{itask.point}/{itask.tdef.name}/{itask.submit_num:02d} " - f"{key1} bad template: {exc}" - ) + f"{key1} bad template: {exc}") continue if cmd == handler: diff --git a/tests/functional/cli/04-cli-error.t b/tests/functional/cli/04-cli-error.t new file mode 100644 index 00000000000..8f0fcaab77d --- /dev/null +++ b/tests/functional/cli/04-cli-error.t @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# Get coverage up for some CLI parser errors. + +. "$(dirname "$0")/test_header" +set_test_number 4 + +init_workflow "${TEST_NAME_BASE}" << __CONFIG__ +[scheduling] + [[graph]] + R1 = foo +[runtime] + [[foo]] +__CONFIG__ + +# "cylc set-outputs" requires a flow number. +TEST_NAME="set-outputs-fail" +run_fail "${TEST_NAME}" \ + cylc set-outputs "${WORKFLOW_NAME}" foo.1 +contains_ok "${TEST_NAME}".stderr <<__END__ +cylc: error: --flow=FLOW is required. +__END__ + +# "cylc trigger --meta" requires --reflow +TEST_NAME="set-trigger-fail" +run_fail "${TEST_NAME}" \ + cylc trigger --meta="the quick brown" "${WORKFLOW_NAME}" foo.1 +contains_ok "${TEST_NAME}".stderr <<__END__ +cylc: error: --meta requires --reflow +__END__ + +purge diff --git a/tests/functional/cylc-kill/03-simulation.t b/tests/functional/cylc-kill/03-simulation.t new file mode 100755 index 00000000000..4f6a03875eb --- /dev/null +++ b/tests/functional/cylc-kill/03-simulation.t @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Test kill a running simulation job + +. "$(dirname "$0")/test_header" + +set_test_number 3 +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" + +run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" + +# run workflow in background +cylc play --debug -m simulation "${WORKFLOW_NAME}" >/dev/null 2>&1 + +# wait for simulated job start +poll_grep_workflow_log "foo.1 .* running" -E + +# kill it +run_ok killer cylc kill $WORKFLOW_NAME foo.1 + +# wait for shut down +poll_grep_workflow_log "INFO - DONE" + +# check the sim job was kiled +grep_workflow_log_ok killed "foo.1 .* failed" -E + +purge diff --git a/tests/functional/cylc-kill/03-simulation/flow.cylc b/tests/functional/cylc-kill/03-simulation/flow.cylc new file mode 100644 index 00000000000..03b6249e962 --- /dev/null +++ b/tests/functional/cylc-kill/03-simulation/flow.cylc @@ -0,0 +1,12 @@ +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "foo?" +[runtime] + [[root]] + [[[simulation]]] + default run length = PT30S + [[foo]] diff --git a/tests/functional/cylc-trigger/06-already-active.t b/tests/functional/cylc-trigger/06-already-active.t new file mode 100644 index 00000000000..dfc4a0da453 --- /dev/null +++ b/tests/functional/cylc-trigger/06-already-active.t @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# Test triggering an already-active task just generates a warning. + +. "$(dirname "$0")/test_header" + +set_test_number 2 + +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" + +run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" + +workflow_run_ok "${TEST_NAME_BASE}-run" \ + cylc play --debug -n "${WORKFLOW_NAME}" + +purge diff --git a/tests/functional/cylc-trigger/06-already-active/flow.cylc b/tests/functional/cylc-trigger/06-already-active/flow.cylc new file mode 100644 index 00000000000..6755cb47998 --- /dev/null +++ b/tests/functional/cylc-trigger/06-already-active/flow.cylc @@ -0,0 +1,22 @@ +# test triggering an already active task +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "triggeree:start & triggerer" +[runtime] + [[triggerer]] + script = """ + cylc__job__poll_grep_workflow_log "triggeree\.1 .* running" -E + cylc trigger $CYLC_WORKFLOW_NAME triggeree.1 + cylc__job__poll_grep_workflow_log \ + "triggeree\.1 .* ignoring trigger - already active" -E + """ + [[triggeree]] + script = """ + cylc__job__poll_grep_workflow_log \ + "triggeree\.1 .* ignoring trigger - already active" -E + """ + diff --git a/tests/functional/restart/53-task-prerequisites/flow.cylc b/tests/functional/restart/53-task-prerequisites/flow.cylc index 3a1caa48143..8055c305cac 100644 --- a/tests/functional/restart/53-task-prerequisites/flow.cylc +++ b/tests/functional/restart/53-task-prerequisites/flow.cylc @@ -15,11 +15,7 @@ [runtime] [[foo]] script = """ - if [[ "$CYLC_TASK_JOB" == '1/foo/01' ]]; then - false - else - true - fi + [[ "$CYLC_TASK_JOB" != '1/foo/01' ]] """ [[apollo]] script = cylc message -- "The Eagle has landed" diff --git a/tests/functional/runahead/06-release-update.t b/tests/functional/runahead/06-release-update.t index 6f9d0475781..3daaca6664a 100644 --- a/tests/functional/runahead/06-release-update.t +++ b/tests/functional/runahead/06-release-update.t @@ -32,10 +32,11 @@ poll_grep_workflow_log -E "bar\.${NEXT1} .* spawned" # sleep a little to allow the datastore to update (`cylc dump` sees the # datastore) TODO can we avoid this flaky sleep somehow? sleep 10 -cylc dump -t "${WORKFLOW_NAME}" | awk '{print $1 $2 $3}' >'log' +# (gratuitous use of --flows for test coverage) +cylc dump --flows -t "${WORKFLOW_NAME}" | awk '{print $1 $2 $3 $7}' >'log' cmp_ok 'log' - <<__END__ -bar,$NEXT1,waiting, -foo,$NEXT1,waiting, +bar,$NEXT1,waiting,[1] +foo,$NEXT1,waiting,[1] __END__ run_ok "${TEST_NAME_BASE}-stop" \ diff --git a/tests/functional/spawn-on-demand/11-hold-not-spawned.t b/tests/functional/spawn-on-demand/11-hold-not-spawned.t new file mode 100644 index 00000000000..8c620f693dc --- /dev/null +++ b/tests/functional/spawn-on-demand/11-hold-not-spawned.t @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- +# Test we can hold a task that hasn't spawned yet. +. "$(dirname "$0")/test_header" +set_test_number 2 +reftest +exit diff --git a/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc b/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc new file mode 100644 index 00000000000..fd77a4ae2c2 --- /dev/null +++ b/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc @@ -0,0 +1,20 @@ +# Test holding a task that hasn't spawned yet. +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "holder => holdee & stopper" +[runtime] + [[holder]] + script = """ + cylc hold $CYLC_WORKFLOW_NAME holdee.1 + """ + [[holdee]] + script = true + [[stopper]] + script = """ + cylc__job__poll_grep_workflow_log "\[holdee\.1 .* holding \(as requested earlier\)" -E + cylc stop $CYLC_WORKFLOW_NAME + """ diff --git a/tests/functional/spawn-on-demand/11-hold-not-spawned/reference.log b/tests/functional/spawn-on-demand/11-hold-not-spawned/reference.log new file mode 100644 index 00000000000..01b08ac7437 --- /dev/null +++ b/tests/functional/spawn-on-demand/11-hold-not-spawned/reference.log @@ -0,0 +1,4 @@ +Initial point: 1 +Final point: 1 +[holder.1] -triggered off [] +[stopper.1] -triggered off ['holder.1'] diff --git a/tests/functional/triggering/18-suicide-active.t b/tests/functional/triggering/18-suicide-active.t new file mode 100644 index 00000000000..dbc53eecb44 --- /dev/null +++ b/tests/functional/triggering/18-suicide-active.t @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# Test warning for "suiciding while active" + +. "$(dirname "$0")/test_header" + +set_test_number 3 + +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" + +run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" + +workflow_run_ok "${TEST_NAME_BASE}-run" \ + cylc play --debug -n "${WORKFLOW_NAME}" + +grep_workflow_log_ok "${TEST_NAME_BASE}-grep" "suiciding while active" + +purge diff --git a/tests/functional/triggering/18-suicide-active/flow.cylc b/tests/functional/triggering/18-suicide-active/flow.cylc new file mode 100644 index 00000000000..a76b82a15e6 --- /dev/null +++ b/tests/functional/triggering/18-suicide-active/flow.cylc @@ -0,0 +1,11 @@ +# test "suiciding while active" warning +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "foo:start => !foo" +[runtime] + [[foo]] + script = sleep 10 diff --git a/tests/functional/triggering/20-and-outputs-suicide/flow.cylc b/tests/functional/triggering/20-and-outputs-suicide/flow.cylc index f2120d9711d..1d58bcdc236 100644 --- a/tests/functional/triggering/20-and-outputs-suicide/flow.cylc +++ b/tests/functional/triggering/20-and-outputs-suicide/flow.cylc @@ -1,4 +1,3 @@ -# NOTE this is an explicit test of suicide triggers, not very useful under SoD? [scheduler] [[events]] abort on stall timeout = True From 953f5ee922e827448c192209d76b60b98fc56542 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Wed, 15 Sep 2021 10:19:47 +1200 Subject: [PATCH 04/14] Tweak new tests. --- cylc/flow/flow_mgr.py | 28 +++++---- tests/functional/cylc-kill/03-simulation.t | 2 +- tests/unit/test_flow_mgr.py | 66 ++++++++++++++++++++++ 3 files changed, 83 insertions(+), 13 deletions(-) create mode 100644 tests/unit/test_flow_mgr.py diff --git a/cylc/flow/flow_mgr.py b/cylc/flow/flow_mgr.py index e16e53f228c..380c1f436bc 100644 --- a/cylc/flow/flow_mgr.py +++ b/cylc/flow/flow_mgr.py @@ -17,7 +17,7 @@ """Manage flow counter and flow metadata.""" from typing import Dict, Set, TYPE_CHECKING -from datetime import datetime, timedelta +import datetime from cylc.flow import LOG @@ -38,8 +38,9 @@ def get_new_flow(self, description: str = "no description") -> int: """Increment flow counter, record flow metadata.""" self.counter += 1 # record start time to nearest second - now = datetime.now() - now_sec: str = str(now - timedelta(microseconds=now.microsecond)) + now = datetime.datetime.now() + now_sec: str = str( + now - datetime.timedelta(microseconds=now.microsecond)) self.flows[self.counter] = { "description": description, "start_time": now_sec @@ -54,19 +55,22 @@ def get_new_flow(self, description: str = "no description") -> int: self.flows[self.counter] ) self.db_mgr.put_workflow_params_1("flow_counter", self.counter) - self.dump_to_log() return self.counter def load_flows_db(self, flow_nums: Set[int]) -> None: """Load metadata for selected flows from DB - on restart.""" self.flows = self.db_mgr.pri_dao.select_workflow_flows(flow_nums) - self.dump_to_log() + self._log() - def dump_to_log(self) -> None: - """Dump current flow info to log.""" - for f in self.flows: - LOG.info( - f"flow: {f}: " - f"({self.flows[f]['description']}) " - f"{self.flows[f]['start_time']} " + def _log(self) -> None: + """Write current flow info to log.""" + LOG.info( + "Flows:\n" + "\n".join( + ( + f"flow: {f} " + f"({self.flows[f]['description']}) " + f"{self.flows[f]['start_time']}" + ) + for f in self.flows ) + ) diff --git a/tests/functional/cylc-kill/03-simulation.t b/tests/functional/cylc-kill/03-simulation.t index 4f6a03875eb..81d44eb6109 100755 --- a/tests/functional/cylc-kill/03-simulation.t +++ b/tests/functional/cylc-kill/03-simulation.t @@ -31,7 +31,7 @@ cylc play --debug -m simulation "${WORKFLOW_NAME}" >/dev/null 2>&1 poll_grep_workflow_log "foo.1 .* running" -E # kill it -run_ok killer cylc kill $WORKFLOW_NAME foo.1 +run_ok killer cylc kill "${WORKFLOW_NAME}" foo.1 # wait for shut down poll_grep_workflow_log "INFO - DONE" diff --git a/tests/unit/test_flow_mgr.py b/tests/unit/test_flow_mgr.py new file mode 100644 index 00000000000..765fdf0c522 --- /dev/null +++ b/tests/unit/test_flow_mgr.py @@ -0,0 +1,66 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Unit tests for FlowManager.""" + +import pytest +import datetime +import logging + +from cylc.flow.flow_mgr import FlowMgr +from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager + + +FAKE_NOW = datetime.datetime(2020, 12, 25, 17, 5, 55) + + +@pytest.fixture +def patch_datetime_now(monkeypatch): + + class mydatetime: + @classmethod + def now(cls): + return FAKE_NOW + + monkeypatch.setattr(datetime, 'datetime', mydatetime) + + +def test_all( + patch_datetime_now, + caplog: pytest.LogCaptureFixture, +): + db_mgr = WorkflowDatabaseManager() + flow_mgr = FlowMgr(db_mgr) + caplog.set_level(logging.INFO) + + count = 1 + meta = "the quick brown fox" + msg1 = f"flow: {count} ({meta}) {FAKE_NOW}" + assert flow_mgr.get_new_flow(meta) == count + assert f"New {msg1}" in caplog.messages + + count = 2 + meta = "jumped over the lazy" + msg2 = f"flow: {count} ({meta}) {FAKE_NOW}" + assert flow_mgr.get_new_flow(meta) == count + assert f"New {msg2}" in caplog.messages + + flow_mgr._log() + assert ( + "Flows:\n" + f"{msg1}\n" + f"{msg2}" + ) in caplog.messages From 5660eac1676a77973fbe77fefccba45623b1614f Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Wed, 15 Sep 2021 15:30:05 +1200 Subject: [PATCH 05/14] Add multi-flow restart func test. --- tests/functional/lib/bash/test_header | 30 +++++++++++++--- tests/functional/restart/50-two-flows.t | 34 +++++++++++++++++++ .../functional/restart/50-two-flows/flow.cylc | 28 +++++++++++++++ .../restart/50-two-flows/reference.log | 6 ++++ .../50-two-flows/reference.restart.log | 6 ++++ 5 files changed, 99 insertions(+), 5 deletions(-) create mode 100644 tests/functional/restart/50-two-flows.t create mode 100644 tests/functional/restart/50-two-flows/flow.cylc create mode 100644 tests/functional/restart/50-two-flows/reference.log create mode 100644 tests/functional/restart/50-two-flows/reference.restart.log diff --git a/tests/functional/lib/bash/test_header b/tests/functional/lib/bash/test_header index 9e47336d494..bf3c5f88a48 100644 --- a/tests/functional/lib/bash/test_header +++ b/tests/functional/lib/bash/test_header @@ -129,7 +129,12 @@ # Install a reference workflow using `install_workflow`, run a validation # test on the workflow and run the reference workflow with `workflow_run_ok`. # Expect 2 OK tests. -# +# install_and_validate +# The first part of reftest, to allow separate use. +# Expect 1 OK test. +# reftest_run +# The guts of reftest, to allow separate use. +# Expect 1 OK test. # create_test_global_config [PRE [POST]] # Create a new global config file $PWD/etc from global-tests.cylc # with PRE and POST pre- and ap-pended (PRE for e.g. jinja2 shebang). @@ -754,19 +759,34 @@ mock_smtpd_kill() { # Logic borrowed from Rose fi } -reftest() { - local TEST_NAME="${1:-${TEST_NAME_BASE}}" +install_and_validate() { + # First part of the reftest function, to allow separate use. + # Expect 1 OK test. + local TEST_NAME="${1:-${TEST_NAME_BASE}}-validate" install_workflow "$@" run_ok "${TEST_NAME}-validate" cylc validate "${WORKFLOW_NAME}" +} + +reftest_run() { + # Guts of the reftest function, to allow separate use. + # Expect 1 OK test. + local TEST_NAME="${1:-${TEST_NAME_BASE}}-run" if [[ -n "${REFTEST_OPTS:-}" ]]; then - workflow_run_ok "${TEST_NAME}-run" \ + workflow_run_ok "${TEST_NAME}" \ cylc play --reference-test --debug --no-detach \ "${REFTEST_OPTS}" "${WORKFLOW_NAME}" else - workflow_run_ok "${TEST_NAME}-run" \ + workflow_run_ok "${TEST_NAME}" \ cylc play --reference-test --debug --no-detach \ "${WORKFLOW_NAME}" fi +} + +reftest() { + # Install, validate, run, and purge, a reference test. + # Expect 2 OK tests. + install_and_validate "$@" + reftest_run "$@" # shellcheck disable=SC2119 purge } diff --git a/tests/functional/restart/50-two-flows.t b/tests/functional/restart/50-two-flows.t new file mode 100644 index 00000000000..4eb4bbd62ce --- /dev/null +++ b/tests/functional/restart/50-two-flows.t @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- +# Test restart with two active flows present. + +. "$(dirname "$0")/test_header" +set_test_number 5 + +# first run reference test +install_and_validate +reftest_run + +# restart reference test +mv "${WORKFLOW_RUN_DIR}/reference.restart.log" "${WORKFLOW_RUN_DIR}/reference.log" +reftest_run + +grep_workflow_log_ok flow-1 "flow: 1 (original from 1)" +grep_workflow_log_ok flow-2 "flow: 2 (cheese wizard)" + +purge diff --git a/tests/functional/restart/50-two-flows/flow.cylc b/tests/functional/restart/50-two-flows/flow.cylc new file mode 100644 index 00000000000..0536767a3a9 --- /dev/null +++ b/tests/functional/restart/50-two-flows/flow.cylc @@ -0,0 +1,28 @@ +# A workflow that triggers a new flow in the graph and then shuts down +# so that we can restart with two active flows present. + +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "a => b => c => d" +[runtime] + [[root]] + pre-script = sleep 2 + [[a]] + script = """ + if ((CYLC_TASK_FLOWS == 2)); then + cylc__job__poll_grep_workflow_log "\[c\.1 .* succeeded" + fi + """ + [[b, d]] + [[c]] + script = """ + if ((CYLC_TASK_FLOWS == 1)); then + cylc trigger --reflow --meta="cheese wizard" $CYLC_WORKFLOW_NAME a.1 + cylc__job__poll_grep_workflow_log "\[a\.1 submitted job:02 flows:2\] => running" + cylc stop $CYLC_WORKFLOW_NAME + fi + """ diff --git a/tests/functional/restart/50-two-flows/reference.log b/tests/functional/restart/50-two-flows/reference.log new file mode 100644 index 00000000000..639b3abb762 --- /dev/null +++ b/tests/functional/restart/50-two-flows/reference.log @@ -0,0 +1,6 @@ +Initial point: 1 +Final point: 1 +[a.1] -triggered off [] +[b.1] -triggered off ['a.1'] +[c.1] -triggered off ['b.1'] +[a.1] -triggered off [] diff --git a/tests/functional/restart/50-two-flows/reference.restart.log b/tests/functional/restart/50-two-flows/reference.restart.log new file mode 100644 index 00000000000..a7045c2406c --- /dev/null +++ b/tests/functional/restart/50-two-flows/reference.restart.log @@ -0,0 +1,6 @@ +Initial point: 1 +Final point: 1 +[d.1] -triggered off ['c.1'] +[b.1] -triggered off ['a.1'] +[c.1] -triggered off ['b.1'] +[d.1] -triggered off ['c.1'] From 345684100d68a9e91dfcc64897da669e090df5ff Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Wed, 15 Sep 2021 15:40:55 +1200 Subject: [PATCH 06/14] Fix change log. --- CHANGES.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 47e35465578..4643a26b451 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -172,9 +172,6 @@ Add an option for displaying source workflows in `cylc scan`. [#4300](https://github.com/cylc/cylc-flow/pull/4300) - Integer flow labels with flow metadata, and improved task logging. -[#4291](https://github.com/cylc/cylc-flow/pull/4291) - - Remove obsolete `cylc edit` and `cylc search` commands. - [#4291](https://github.com/cylc/cylc-flow/pull/4291) - Remove obsolete `cylc edit` and `cylc search` commands. From 38b8f98e2758378db890b6c36208b51be4092d84 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Wed, 13 Oct 2021 18:52:41 +1300 Subject: [PATCH 07/14] Address PR review comments. --- cylc/flow/flow_mgr.py | 10 +++++----- cylc/flow/scripts/set_outputs.py | 3 +-- cylc/flow/task_job_mgr.py | 3 ++- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cylc/flow/flow_mgr.py b/cylc/flow/flow_mgr.py index 380c1f436bc..9a98f422944 100644 --- a/cylc/flow/flow_mgr.py +++ b/cylc/flow/flow_mgr.py @@ -16,13 +16,11 @@ """Manage flow counter and flow metadata.""" -from typing import Dict, Set, TYPE_CHECKING +from typing import Dict, Set import datetime from cylc.flow import LOG - -if TYPE_CHECKING: - from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager +from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager class FlowMgr: @@ -54,7 +52,9 @@ def get_new_flow(self, description: str = "no description") -> int: self.counter, self.flows[self.counter] ) - self.db_mgr.put_workflow_params_1("flow_counter", self.counter) + self.db_mgr.put_workflow_params_1( + WorkflowDatabaseManager.KEY_FLOW_COUNTER, + self.counter) return self.counter def load_flows_db(self, flow_nums: Set[int]) -> None: diff --git a/cylc/flow/scripts/set_outputs.py b/cylc/flow/scripts/set_outputs.py index a5dac1a2262..28c15c6e73c 100755 --- a/cylc/flow/scripts/set_outputs.py +++ b/cylc/flow/scripts/set_outputs.py @@ -18,8 +18,7 @@ """cylc set-outputs [OPTIONS] ARGS -Tell the scheduler that specified (or "succeeded", by default) outputs -of tasks are complete. +Set specified task outputs ("succeeded" by default) to complete. Downstream tasks will be spawned or updated just as if the outputs were completed normally. diff --git a/cylc/flow/task_job_mgr.py b/cylc/flow/task_job_mgr.py index 052dc72a175..861a64b9503 100644 --- a/cylc/flow/task_job_mgr.py +++ b/cylc/flow/task_job_mgr.py @@ -775,9 +775,10 @@ def _manip_task_jobs_callback( itask = tasks[(point, name, submit_num)] callback(workflow, itask, ctx, line) except (LookupError, ValueError) as exc: + # (Note this catches KeyError too). LOG.warning( 'Unhandled %s output: %s', ctx.cmd_key, line) - LOG.exception(exc) + LOG.warning(str(exc)) # Task jobs that are in the original command but did not get a status # in the output. Handle as failures. for key, itask in sorted(bad_tasks.items()): From dbfc2d5411fc8223964ccf1cebefe93401966d18 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Wed, 13 Oct 2021 19:10:48 +1300 Subject: [PATCH 08/14] Add nosec comment. --- cylc/flow/rundb.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cylc/flow/rundb.py b/cylc/flow/rundb.py index 96443187485..074be929cb3 100644 --- a/cylc/flow/rundb.py +++ b/cylc/flow/rundb.py @@ -511,11 +511,14 @@ def select_workflow_params(self, callback): def select_workflow_flows(self, flow_nums): """Return flow data for selected flows.""" - stmt = ( - f"SELECT flow_num, start_time, description " - f"FROM {self.TABLE_WORKFLOW_FLOWS} " - f"WHERE flow_num in ({','.join(str(f) for f in flow_nums)})" - ) + stmt = rf''' + SELECT + flow_num, start_time, description + FROM + {self.TABLE_WORKFLOW_FLOWS} + WHERE + flow_num in ({','.join(str(f) for f in flow_nums)}) + ''' # nosec (table name is code constant, flow_nums just integers) flows = {} for flow_num, start_time, descr in self.connect().execute(stmt): flows[flow_num] = { From 4c4c455311b3efd70bf9cfe7792796078adf3be3 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Thu, 14 Oct 2021 10:10:53 +1300 Subject: [PATCH 09/14] Fix cylc poll CLI doc. --- cylc/flow/scheduler.py | 3 ++- cylc/flow/scripts/poll.py | 11 ++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py index 8af46d50e2a..32940bd60e6 100644 --- a/cylc/flow/scheduler.py +++ b/cylc/flow/scheduler.py @@ -938,11 +938,12 @@ def command_resume(self) -> None: self.resume_workflow() def command_poll_tasks(self, items=None): - """Poll pollable tasks or a task/family if options are provided.""" + """Poll pollable tasks or a task or family if options are provided.""" if self.config.run_mode('simulation'): return itasks, bad_items = self.pool.filter_task_proxies(items) self.task_job_mgr.poll_task_jobs(self.workflow, itasks) + # (Could filter itasks by state here if needed) return len(bad_items) def command_kill_tasks(self, items=None): diff --git a/cylc/flow/scripts/poll.py b/cylc/flow/scripts/poll.py index e9378ad4055..92f0cea55ee 100755 --- a/cylc/flow/scripts/poll.py +++ b/cylc/flow/scripts/poll.py @@ -18,11 +18,16 @@ """cylc poll [OPTIONS] ARGS -Poll (query) task jobs to verify and update their statuses. +Poll pollable task jobs to verify and update their statuses in the scheduler. + +This checks the job status file and queries the job runner on the job platform. + +Pollable tasks are those in the n=0 window with an associated job ID, including +incomplete finished tasks. Examples: - $ cylc poll WORKFLOW # poll all active tasks - $ cylc poll WORKFLOW TASK_GLOB # poll multiple active tasks or families + $ cylc poll WORKFLOW # poll all pollable tasks + $ cylc poll WORKFLOW TASK_GLOB # poll multiple pollable tasks or families """ from typing import TYPE_CHECKING From 1bdb0e1b1e87063f8abfdda8d20919a782373221 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Thu, 14 Oct 2021 16:17:41 +1300 Subject: [PATCH 10/14] Fix a func test post rebase. --- .../intelligent-host-selection/05-from-platform-group.t | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/intelligent-host-selection/05-from-platform-group.t b/tests/functional/intelligent-host-selection/05-from-platform-group.t index 40760e346d7..ccb17afcfee 100644 --- a/tests/functional/intelligent-host-selection/05-from-platform-group.t +++ b/tests/functional/intelligent-host-selection/05-from-platform-group.t @@ -70,7 +70,7 @@ named_grep_ok "job submit fails for badhostplatform" "badhostplatform: Tried all "${WORKFLOW_RUN_DIR}/log/workflow/log" named_grep_ok "job submit fails for unreachable_host" "\"jobs-submit\" failed.*\"bad_host1\"" \ "${WORKFLOW_RUN_DIR}/log/workflow/log" -named_grep_ok "job submit _finally_ works" "[ugly.1].*preparing => submitted" \ +named_grep_ok "job submit _finally_ works" "\[ugly\.1 preparing job:01 flows:1\] => submitted" \ "${WORKFLOW_RUN_DIR}/log/workflow/log" purge From 8f920ce7d4cbe8b21d7927c3e53f9bfb03019f5f Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Fri, 15 Oct 2021 15:02:35 +1300 Subject: [PATCH 11/14] Set flow counter from DB. --- cylc/flow/flow_mgr.py | 19 +++++++++++-------- cylc/flow/rundb.py | 10 ++++++++++ cylc/flow/scheduler.py | 8 ++------ cylc/flow/task_pool.py | 2 +- cylc/flow/workflow_db_mgr.py | 1 - tests/flakyfunctional/database/00-simple.t | 1 - .../01-job-nn-localhost/db.sqlite3 | 1 - tests/functional/restart/50-two-flows.t | 2 +- 8 files changed, 25 insertions(+), 19 deletions(-) diff --git a/cylc/flow/flow_mgr.py b/cylc/flow/flow_mgr.py index 9a98f422944..9300831342a 100644 --- a/cylc/flow/flow_mgr.py +++ b/cylc/flow/flow_mgr.py @@ -29,10 +29,10 @@ class FlowMgr: def __init__(self, db_mgr: "WorkflowDatabaseManager") -> None: """Initialise the flow manager.""" self.db_mgr = db_mgr - self.counter = 0 self.flows: Dict[int, Dict[str, str]] = {} + self.counter: int = 0 - def get_new_flow(self, description: str = "no description") -> int: + def get_new_flow(self, description: str) -> int: """Increment flow counter, record flow metadata.""" self.counter += 1 # record start time to nearest second @@ -40,7 +40,7 @@ def get_new_flow(self, description: str = "no description") -> int: now_sec: str = str( now - datetime.timedelta(microseconds=now.microsecond)) self.flows[self.counter] = { - "description": description, + "description": description or "no description", "start_time": now_sec } LOG.info( @@ -52,13 +52,16 @@ def get_new_flow(self, description: str = "no description") -> int: self.counter, self.flows[self.counter] ) - self.db_mgr.put_workflow_params_1( - WorkflowDatabaseManager.KEY_FLOW_COUNTER, - self.counter) return self.counter - def load_flows_db(self, flow_nums: Set[int]) -> None: - """Load metadata for selected flows from DB - on restart.""" + def load_from_db(self, flow_nums: Set[int]) -> None: + """Load flow data for scheduler restart. + + Sets the flow counter to the max flow number in the DB. + Loads metadata for selected flows (those in the task pool at startup). + + """ + self.counter = self.db_mgr.pri_dao.select_workflow_flows_max_flow_num() self.flows = self.db_mgr.pri_dao.select_workflow_flows(flow_nums) self._log() diff --git a/cylc/flow/rundb.py b/cylc/flow/rundb.py index 074be929cb3..38455dffe7f 100644 --- a/cylc/flow/rundb.py +++ b/cylc/flow/rundb.py @@ -527,6 +527,16 @@ def select_workflow_flows(self, flow_nums): } return flows + def select_workflow_flows_max_flow_num(self): + """Return max flow number in the workflow_flows table.""" + stmt = rf''' + SELECT + MAX(flow_num) + FROM + {self.TABLE_WORKFLOW_FLOWS} + ''' # nosec (table name is code constant) + return self.connect().execute(stmt).fetchone()[0] + def select_workflow_params_restart_count(self): """Return number of restarts in workflow_params table.""" stmt = rf""" diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py index 32940bd60e6..42a8b61465e 100644 --- a/cylc/flow/scheduler.py +++ b/cylc/flow/scheduler.py @@ -664,7 +664,7 @@ def _load_pool_from_tasks(self): self.pool.force_trigger_tasks( self.options.starttask, reflow=True, - flow_descr=f"original, from {self.options.starttask}" + flow_descr=f"original flow from {self.options.starttask}" ) def _load_pool_from_point(self): @@ -683,7 +683,7 @@ def _load_pool_from_point(self): LOG.info(f"{start_type} start from {self.config.start_point}") flow_num = self.flow_mgr.get_new_flow( - f"original from {self.config.start_point}" + f"original flow from {self.config.start_point}" ) for name in self.config.get_task_name_list(): if self.config.start_point is None: @@ -1150,7 +1150,6 @@ def _load_workflow_params(self, row_idx, row): * Workflow UUID. * A flag to indicate if the workflow should be paused or not. * Original workflow run time zone. - * flow counter """ if row_idx == 0: LOG.info('LOADING workflow parameters') @@ -1214,9 +1213,6 @@ def _load_workflow_params(self, row_idx, row): elif key == self.workflow_db_mgr.KEY_CYCLE_POINT_TIME_ZONE: self.options.cycle_point_tz = value LOG.info(f"+ cycle point time zone = {value}") - elif key == self.workflow_db_mgr.KEY_FLOW_COUNTER: - self.flow_mgr.counter = int(value) - LOG.info(f"+ flow counter = {value}") def _load_template_vars(self, _, row): """Load workflow start up template variables.""" diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 5d1c10795b9..54584f13318 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -360,7 +360,7 @@ def update_flow_mgr(self): flow_nums_seen = set() for itask in self.get_all_tasks(): flow_nums_seen.update(itask.flow_nums) - self.flow_mgr.load_flows_db(flow_nums_seen) + self.flow_mgr.load_from_db(flow_nums_seen) def load_abs_outputs_for_restart(self, row_idx, row): cycle, name, output = row diff --git a/cylc/flow/workflow_db_mgr.py b/cylc/flow/workflow_db_mgr.py index f1a3b9134fe..38e92b7dd65 100644 --- a/cylc/flow/workflow_db_mgr.py +++ b/cylc/flow/workflow_db_mgr.py @@ -73,7 +73,6 @@ class WorkflowDatabaseManager: KEY_CYCLE_POINT_FORMAT = 'cycle_point_format' KEY_CYCLE_POINT_TIME_ZONE = 'cycle_point_tz' KEY_RESTART_COUNT = 'n_restart' - KEY_FLOW_COUNTER = "flow_counter" TABLE_BROADCAST_EVENTS = CylcWorkflowDAO.TABLE_BROADCAST_EVENTS TABLE_BROADCAST_STATES = CylcWorkflowDAO.TABLE_BROADCAST_STATES diff --git a/tests/flakyfunctional/database/00-simple.t b/tests/flakyfunctional/database/00-simple.t index 0854c37bf98..142308722fb 100644 --- a/tests/flakyfunctional/database/00-simple.t +++ b/tests/flakyfunctional/database/00-simple.t @@ -45,7 +45,6 @@ sed -i "s/$(cylc --version)//g" "${NAME}" cmp_ok "${NAME}" << __EOF__ UTC_mode|0 cylc_version| -flow_counter|1 __EOF__ NAME='select-task-events.out' diff --git a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 index 11c28b9baf4..86c06cd6475 100644 --- a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 +++ b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 @@ -7,7 +7,6 @@ INSERT INTO inheritance VALUES('root','["root"]'); INSERT INTO inheritance VALUES('foo','["foo", "root"]'); CREATE TABLE workflow_params(key TEXT, value TEXT, PRIMARY KEY(key)); INSERT INTO workflow_params VALUES('cylc_version', '8.0b2.dev'); -INSERT INTO workflow_params VALUES('flow_counter', '1'); CREATE TABLE workflow_template_vars(key TEXT, value TEXT, PRIMARY KEY(key)); CREATE TABLE task_action_timers(cycle TEXT, name TEXT, ctx_key TEXT, ctx TEXT, delays TEXT, num INTEGER, delay TEXT, timeout TEXT, PRIMARY KEY(cycle, name, ctx_key)); INSERT INTO task_action_timers VALUES('1','foo','"poll_timer"','["tuple", [[99, "running"]]]','[]',0,NULL,NULL); diff --git a/tests/functional/restart/50-two-flows.t b/tests/functional/restart/50-two-flows.t index 4eb4bbd62ce..8a1bf7efe07 100644 --- a/tests/functional/restart/50-two-flows.t +++ b/tests/functional/restart/50-two-flows.t @@ -28,7 +28,7 @@ reftest_run mv "${WORKFLOW_RUN_DIR}/reference.restart.log" "${WORKFLOW_RUN_DIR}/reference.log" reftest_run -grep_workflow_log_ok flow-1 "flow: 1 (original from 1)" +grep_workflow_log_ok flow-1 "flow: 1 (original flow from 1)" grep_workflow_log_ok flow-2 "flow: 2 (cheese wizard)" purge From 07004d780b2e708e88a42b4531250d7fb89ae81f Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Fri, 15 Oct 2021 15:54:45 +1300 Subject: [PATCH 12/14] Post rebase fix. --- cylc/flow/task_pool.py | 46 +++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 54584f13318..52ddbddc78c 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -521,7 +521,7 @@ def load_db_task_action_timers(self, row_idx, row): {"id": id_, "ctx_key": ctx_key_raw}) return - def load_db_tasks_to_hold(self) -> None: + def load_db_tasks_to_hold(self): """Update the tasks_to_hold set with the tasks stored in the database.""" self.tasks_to_hold.update( @@ -529,7 +529,7 @@ def load_db_tasks_to_hold(self) -> None: self.workflow_db_mgr.pri_dao.select_tasks_to_hold() ) - def spawn_successor(self, itask): + def spawn_successor(self, itask: TaskProxy) -> Optional[TaskProxy]: """Spawn next-cycle instance of itask if parentless. This includes: @@ -538,22 +538,26 @@ def spawn_successor(self, itask): - absolute-triggered tasks (after the first instance is spawned) """ next_point = itask.next_point() - if next_point is not None: - parent_points = itask.tdef.get_parent_points(next_point) - if ( - not parent_points - or all(x < self.config.start_point for x in parent_points) - or itask.tdef.get_abs_triggers(next_point) - ): - taskid = TaskID.get(itask.tdef.name, next_point) - next_task = ( - self._get_hidden_task_by_id(taskid) - or self._get_main_task_by_id(taskid) - or self.spawn_task( - itask.tdef.name, next_point, itask.flow_nums) - ) - if next_task: - self.add_to_pool(next_task) + if next_point is None: + return None + + parent_points = itask.tdef.get_parent_points(next_point) + if ( + not parent_points + or all(x < self.config.start_point for x in parent_points) + or itask.tdef.has_only_abs_triggers(next_point) + ): + taskid = TaskID.get(itask.tdef.name, next_point) + next_task = ( + self._get_hidden_task_by_id(taskid) + or self._get_main_task_by_id(taskid) + or self.spawn_task( + itask.tdef.name, next_point, itask.flow_nums) + ) + if next_task: + self.add_to_pool(next_task) + return next_task + return None def release_runahead_task( self, @@ -577,9 +581,6 @@ def release_runahead_task( if itask.tdef.max_future_prereq_offset is not None: self.set_max_future_offset() - if not runahead_limit_point: - return - if itask.tdef.sequential: # implicit prev-instance parent return @@ -588,6 +589,9 @@ def release_runahead_task( # No reflow return + if not runahead_limit_point: + return + # Autospawn successor of itask if parentless. n_task = self.spawn_successor(itask) if n_task and n_task.point <= runahead_limit_point: From 0335775524c4ec8e22495bcf81e32a6219c6dc2a Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Thu, 21 Oct 2021 09:40:46 +1300 Subject: [PATCH 13/14] Update cylc/flow/task_pool.py Co-authored-by: Melanie Hall <37735232+datamel@users.noreply.github.com> --- cylc/flow/task_pool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 52ddbddc78c..8697c74d5d8 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -800,7 +800,7 @@ def reload_taskdefs(self) -> None: # Keep active orphaned task, but stop it from spawning. itask.graph_children = {} LOG.warning( - "f[{itask}] will not spawn children " + f"[{itask}] will not spawn children " "- task definition removed" ) else: From c58eef451483ce02070ac92266c9398d2397ea34 Mon Sep 17 00:00:00 2001 From: Hilary James Oliver Date: Thu, 21 Oct 2021 10:00:02 +1300 Subject: [PATCH 14/14] Address review feedback (tweak some docstrings). --- cylc/flow/network/resolvers.py | 3 +++ cylc/flow/scheduler.py | 6 +++++- cylc/flow/task_pool.py | 6 +++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cylc/flow/network/resolvers.py b/cylc/flow/network/resolvers.py index ab237f30194..a40a3f48ec8 100644 --- a/cylc/flow/network/resolvers.py +++ b/cylc/flow/network/resolvers.py @@ -715,9 +715,12 @@ def set_graph_window_extent(self, n_edge_distance): def force_spawn_children(self, tasks, outputs, flow_num): """Spawn children of given task outputs. + User-facing method name: set_outputs. + Args: tasks (list): List of identifiers, see `task globs` outputs (list): List of outputs to spawn on + flow_num (int): Flow number to attribute the outputs. Returns: tuple: (outcome, message) diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py index 42a8b61465e..bf833ad69bf 100644 --- a/cylc/flow/scheduler.py +++ b/cylc/flow/scheduler.py @@ -1845,7 +1845,11 @@ def command_force_trigger_tasks(self, items, reflow, flow_descr): return self.pool.force_trigger_tasks(items, reflow, flow_descr) def command_force_spawn_children(self, items, outputs, flow_num): - """Force spawn task successors.""" + """Force spawn task successors. + + User-facing method name: set_outputs. + + """ return self.pool.force_spawn_children(items, outputs, flow_num) def _update_profile_info(self, category, amount, amount_format="%s"): diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 8697c74d5d8..f713d4fbf83 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -1388,7 +1388,11 @@ def match_taskdefs( return n_warnings, task_items def force_spawn_children(self, items, outputs, flow_num): - """Spawn downstream children of given task outputs on user command.""" + """Spawn downstream children of given task outputs on user command. + + User-facing method name: set_outputs. + + """ n_warnings, task_items = self.match_taskdefs(items) for (_, point), taskdef in sorted(task_items.items()): # This the upstream target task: