From cb3a0477d5ba67696791a5fe852057ffded32ade Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Wed, 20 Apr 2022 12:40:53 +0800 Subject: [PATCH] Support option --ports of config qos reload for reloading ports' QoS and buffer configuration to default (#2125) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What I did CLI: config qos reload --ports --ports : a set of interfaces with empty QoS and buffer configurations (typically they have just been created via DPB). Format: {,port}, like “Ethernet0” or “Ethernet4,Ethernet5,Ethernet6,Ethernet7” Each port in the list should exist in the CONFIG_DB.PORT table The flow: Render the template qos_config.j2 and buffer_config.j2, generating a temporary json file. (This is one step in “config qos reload”). Parse the json file, extracting all the items on the ports in the port_list Apply all the extracted items into the CONFIG_DB Signed-off-by: Stephen Sun --- config/main.py | 167 ++++++++++++++++++++++- doc/Command-Reference.md | 30 ++++ tests/config_test.py | 52 +++++++ tests/qos_config_input/0/update_qos.json | 57 ++++++++ tests/qos_config_input/1/update_qos.json | 57 ++++++++ tests/qos_config_input/update_qos.json | 57 ++++++++ 6 files changed, 419 insertions(+), 1 deletion(-) create mode 100644 tests/qos_config_input/0/update_qos.json create mode 100644 tests/qos_config_input/1/update_qos.json create mode 100644 tests/qos_config_input/update_qos.json diff --git a/config/main.py b/config/main.py index 197fb33662..3d549b991c 100644 --- a/config/main.py +++ b/config/main.py @@ -2258,6 +2258,7 @@ def _update_buffer_calculation_model(config_db, model): @qos.command('reload') @click.pass_context +@click.option('--ports', is_flag=False, required=False, help="List of ports that needs to be updated") @click.option('--no-dynamic-buffer', is_flag=True, help="Disable dynamic buffer calculation") @click.option( '--json-data', type=click.STRING, @@ -2267,8 +2268,13 @@ def _update_buffer_calculation_model(config_db, model): '--dry_run', type=click.STRING, help="Dry run, writes config to the given file" ) -def reload(ctx, no_dynamic_buffer, dry_run, json_data): +def reload(ctx, no_dynamic_buffer, dry_run, json_data, ports): """Reload QoS configuration""" + if ports: + log.log_info("'qos reload --ports {}' executing...".format(ports)) + _qos_update_ports(ctx, ports, dry_run, json_data) + return + log.log_info("'qos reload' executing...") _clear_qos() @@ -2340,6 +2346,165 @@ def reload(ctx, no_dynamic_buffer, dry_run, json_data): if buffer_model_updated: print("Buffer calculation model updated, restarting swss is required to take effect") +def _qos_update_ports(ctx, ports, dry_run, json_data): + """Reload QoS configuration""" + _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() + sonic_version_file = device_info.get_sonic_version_file() + + portlist = ports.split(',') + portset_to_handle = set(portlist) + portset_handled = set() + + namespace_list = [DEFAULT_NAMESPACE] + if multi_asic.get_num_asics() > 1: + namespace_list = multi_asic.get_namespaces_from_linux() + + # Tables whose key is port only + tables_single_index = [ + 'PORT_QOS_MAP', + 'BUFFER_PORT_INGRESS_PROFILE_LIST', + 'BUFFER_PORT_EGRESS_PROFILE_LIST'] + # Tables whose key is port followed by other element + tables_multi_index = [ + 'QUEUE', + 'BUFFER_PG', + 'BUFFER_QUEUE'] + + if json_data: + from_db = "--additional-data \'{}\'".format(json_data) if json_data else "" + else: + from_db = "-d" + + items_to_update = {} + config_dbs = {} + + for ns in namespace_list: + if ns is DEFAULT_NAMESPACE: + asic_id_suffix = "" + config_db = ConfigDBConnector() + else: + asic_id = multi_asic.get_asic_id_from_name(ns) + if asic_id is None: + click.secho("Command 'qos update' failed with invalid namespace '{}'".format(ns), fg="yellow") + raise click.Abort() + asic_id_suffix = str(asic_id) + + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=ns) + + config_db.connect() + config_dbs[ns] = config_db + if is_dynamic_buffer_enabled(config_db): + buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers_dynamic.json.j2") + else: + buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers.json.j2") + + if not os.path.isfile(buffer_template_file): + click.secho("Buffer definition template not found at {}".format(buffer_template_file), fg="yellow") + ctx.abort() + + qos_template_file = os.path.join(hwsku_path, asic_id_suffix, "qos.json.j2") + + if not os.path.isfile(qos_template_file): + click.secho("QoS definition template not found at {}".format(qos_template_file), fg="yellow") + ctx.abort() + + # Remove multi indexed entries first + for table_name in tables_multi_index: + entries = config_db.get_keys(table_name) + for key in entries: + port, _ = key + if not port in portset_to_handle: + continue + config_db.set_entry(table_name, '|'.join(key), None) + + cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) + command = "{} {} {} -t {},config-db -t {},config-db -y {} --print-data".format( + SONIC_CFGGEN_PATH, cmd_ns, from_db, buffer_template_file, qos_template_file, sonic_version_file + ) + jsonstr = clicommon.run_command(command, display_cmd=False, return_cmd=True) + + jsondict = json.loads(jsonstr) + port_table = jsondict.get('PORT') + if port_table: + ports_to_update = set(port_table.keys()).intersection(portset_to_handle) + if not ports_to_update: + continue + else: + continue + + portset_handled.update(ports_to_update) + + items_to_apply = {} + + for table_name in tables_single_index: + table_items_rendered = jsondict.get(table_name) + if table_items_rendered: + for key, data in table_items_rendered.items(): + port = key + if not port in ports_to_update: + continue + # Push the rendered data to config-db + if not items_to_apply.get(table_name): + items_to_apply[table_name] = {} + items_to_apply[table_name][key] = data + + for table_name in tables_multi_index: + table_items_rendered = jsondict.get(table_name) + if table_items_rendered: + for key, data in table_items_rendered.items(): + port = key.split('|')[0] + if not port in ports_to_update: + continue + # Push the result to config-db + if not items_to_apply.get(table_name): + items_to_apply[table_name] = {} + items_to_apply[table_name][key] = data + + # Handle CABLE_LENGTH + # This table needs to be specially handled because the port is not the index but the field name + # The idea is for all the entries in template, the same entries in CONFIG_DB will be merged together + # Eg. there is entry AZURE rendered from template for ports Ethernet0, Ethernet4 with cable length "5m": + # and entry AZURE in CONFIG_DB for ports Ethernet8, Ethernet12, Ethernet16 with cable length "40m" + # The entry that will eventually be pushed into CONFIG_DB is + # {"AZURE": {"Ethernet0": "5m", "Ethernet4": "5m", "Ethernet8": "40m", "Ethernet12": "40m", "Ethernet16": "40m"}} + table_name = 'CABLE_LENGTH' + cable_length_table = jsondict.get(table_name) + if cable_length_table: + for key, item in cable_length_table.items(): + cable_length_from_db = config_db.get_entry(table_name, key) + cable_length_from_template = {} + for port in ports_to_update: + cable_len = item.get(port) + if cable_len: + cable_length_from_template[port] = cable_len + # Reaching this point, + # - cable_length_from_template contains cable length rendered from the template, eg Ethernet0 and Ethernet4 in the above example + # - cable_length_from_db contains cable length existing in the CONFIG_DB, eg Ethernet8, Ethernet12, and Ethernet16 in the above exmaple + + if not items_to_apply.get(table_name): + items_to_apply[table_name] = {} + + if cable_length_from_db: + cable_length_from_db.update(cable_length_from_template) + items_to_apply[table_name][key] = cable_length_from_db + else: + items_to_apply[table_name][key] = cable_length_from_template + + if items_to_apply: + items_to_update[ns] = items_to_apply + + if dry_run: + with open(dry_run + ns, "w+") as f: + json.dump(items_to_apply, f, sort_keys=True, indent=4) + else: + jsonstr = json.dumps(items_to_apply) + cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) + command = "{} {} --additional-data '{}' --write-to-db".format(SONIC_CFGGEN_PATH, cmd_ns, jsonstr) + clicommon.run_command(command, display_cmd=False) + + if portset_to_handle != portset_handled: + click.echo("The port(s) {} are not updated because they do not exist".format(portset_to_handle - portset_handled)) + def is_dynamic_buffer_enabled(config_db): """Return whether the current system supports dynamic buffer calculation""" device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost') diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 2843bd0ca6..dc750d2bbc 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -7586,6 +7586,36 @@ Some of the example QOS configurations that users can modify are given below. When there are no changes in the platform specific configutation files, they internally use the file "/usr/share/sonic/templates/buffers_config.j2" and "/usr/share/sonic/templates/qos_config.j2" to generate the configuration. ``` +**config qos reload --ports port_list** + +This command is used to reload the default QoS configuration on a group of ports. +Typically, the default QoS configuration is in the following tables. +1) PORT_QOS_MAP +2) QUEUE +3) BUFFER_PG +4) BUFFER_QUEUE +5) BUFFER_PORT_INGRESS_PROFILE_LIST +6) BUFFER_PORT_EGRESS_PROFILE_LIST +7) CABLE_LENGTH + +If there was QoS configuration in the above tables for the ports: + + - if `--force` option is provied, the existing QoS configuration will be replaced by the default QoS configuration, + - otherwise, the command will exit with nothing updated. + +- Usage: + ``` + config qos reload --ports [,port] + ``` + +- Example: + ``` + admin@sonic:~$ sudo config qos reload --ports Ethernet0,Ethernet4 + + In this example, it updates the QoS configuration on port Ethernet0 and Ethernet4 to default. + If there was QoS configuration on the ports, the command will clear the existing QoS configuration on the port and reload to default. + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#qos) ## sFlow diff --git a/tests/config_test.py b/tests/config_test.py index 72110f805e..88f6015ee5 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -442,6 +442,24 @@ def test_qos_reload_single( ) assert filecmp.cmp(output_file, expected_result, shallow=False) + def test_qos_update_single( + self, get_cmd_module, setup_qos_mock_apis + ): + (config, show) = get_cmd_module + json_data = '{"DEVICE_METADATA": {"localhost": {}}, "PORT": {"Ethernet0": {}}}' + runner = CliRunner() + output_file = os.path.join(os.sep, "tmp", "qos_config_update.json") + cmd_vector = ["reload", "--ports", "Ethernet0", "--json-data", json_data, "--dry_run", output_file] + result = runner.invoke(config.config.commands["qos"], cmd_vector) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + cwd = os.path.dirname(os.path.realpath(__file__)) + expected_result = os.path.join( + cwd, "qos_config_input", "update_qos.json" + ) + assert filecmp.cmp(output_file, expected_result, shallow=False) + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -495,6 +513,40 @@ def test_qos_reload_masic( file = "{}{}".format(output_file, asic) assert filecmp.cmp(file, expected_result, shallow=False) + def test_qos_update_masic( + self, get_cmd_module, setup_qos_mock_apis, + setup_multi_broadcom_masic + ): + (config, show) = get_cmd_module + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "qos_update_output") + print("Saving output in {}<0,1,2..>".format(output_file)) + num_asic = device_info.get_num_npus() + for asic in range(num_asic): + try: + file = "{}{}".format(output_file, asic) + os.remove(file) + except OSError: + pass + json_data = '{"DEVICE_METADATA": {"localhost": {}}, "PORT": {"Ethernet0": {}}}' + result = runner.invoke( + config.config.commands["qos"], + ["reload", "--ports", "Ethernet0,Ethernet4", "--json-data", json_data, "--dry_run", output_file] + ) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + cwd = os.path.dirname(os.path.realpath(__file__)) + + for asic in range(num_asic): + expected_result = os.path.join( + cwd, "qos_config_input", str(asic), "update_qos.json" + ) + + assert filecmp.cmp(output_file + "asic{}".format(asic), expected_result, shallow=False) + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/qos_config_input/0/update_qos.json b/tests/qos_config_input/0/update_qos.json new file mode 100644 index 0000000000..09749a5123 --- /dev/null +++ b/tests/qos_config_input/0/update_qos.json @@ -0,0 +1,57 @@ +{ + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet0": "300m" + } + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + }, + "QUEUE": { + "Ethernet0|0": { + "scheduler": "scheduler.0" + }, + "Ethernet0|1": { + "scheduler": "scheduler.0" + }, + "Ethernet0|2": { + "scheduler": "scheduler.0" + }, + "Ethernet0|3": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|4": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|5": { + "scheduler": "scheduler.0" + }, + "Ethernet0|6": { + "scheduler": "scheduler.0" + } + } +} \ No newline at end of file diff --git a/tests/qos_config_input/1/update_qos.json b/tests/qos_config_input/1/update_qos.json new file mode 100644 index 0000000000..09749a5123 --- /dev/null +++ b/tests/qos_config_input/1/update_qos.json @@ -0,0 +1,57 @@ +{ + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet0": "300m" + } + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + }, + "QUEUE": { + "Ethernet0|0": { + "scheduler": "scheduler.0" + }, + "Ethernet0|1": { + "scheduler": "scheduler.0" + }, + "Ethernet0|2": { + "scheduler": "scheduler.0" + }, + "Ethernet0|3": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|4": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|5": { + "scheduler": "scheduler.0" + }, + "Ethernet0|6": { + "scheduler": "scheduler.0" + } + } +} \ No newline at end of file diff --git a/tests/qos_config_input/update_qos.json b/tests/qos_config_input/update_qos.json new file mode 100644 index 0000000000..09749a5123 --- /dev/null +++ b/tests/qos_config_input/update_qos.json @@ -0,0 +1,57 @@ +{ + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet0": "300m" + } + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + }, + "QUEUE": { + "Ethernet0|0": { + "scheduler": "scheduler.0" + }, + "Ethernet0|1": { + "scheduler": "scheduler.0" + }, + "Ethernet0|2": { + "scheduler": "scheduler.0" + }, + "Ethernet0|3": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|4": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|5": { + "scheduler": "scheduler.0" + }, + "Ethernet0|6": { + "scheduler": "scheduler.0" + } + } +} \ No newline at end of file