From 8389c8137d4fdc5a2d1c803c9c06e6dcb4f849c1 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Sun, 27 Mar 2022 10:08:00 +0300 Subject: [PATCH 01/11] [sonic-cli-gen] fix failure "Error: digits_class" when field "digit_class" does not exist in DB (#2054) here Signed-off-by: Stepan Blyschak --- .../templates/sonic-cli-gen/config.py.j2 | 2 +- .../autogen_test/show_cmd_output.py | 12 ++++++------ .../autogen_test/sonic-device_metadata.yang | 4 ++++ tests/cli_autogen_test.py | 12 ++++++++++++ 4 files changed, 23 insertions(+), 7 deletions(-) diff --git a/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 b/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 index 7706ae3940..0906d7c99f 100644 --- a/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 +++ b/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 @@ -102,7 +102,7 @@ def update_entry_validated(db, table, key, data, create_if_not_exists=False): entry_changed = False for attr, value in data.items(): - if value == cfg[table][key][attr]: + if value == cfg[table][key].get(attr): continue entry_changed = True if value is None: diff --git a/tests/cli_autogen_input/autogen_test/show_cmd_output.py b/tests/cli_autogen_input/autogen_test/show_cmd_output.py index 19c02c7783..a0a0e8cf7e 100644 --- a/tests/cli_autogen_input/autogen_test/show_cmd_output.py +++ b/tests/cli_autogen_input/autogen_test/show_cmd_output.py @@ -4,16 +4,16 @@ show_device_metadata_localhost="""\ -HWSKU DEFAULT BGP STATUS DOCKER ROUTING CONFIG MODE HOSTNAME PLATFORM MAC DEFAULT PFCWD STATUS BGP ASN DEPLOYMENT ID TYPE BUFFER MODEL FRR MGMT FRAMEWORK CONFIG ------------ -------------------- ---------------------------- ---------- ---------------------- ----------------- ---------------------- --------- --------------- --------- -------------- --------------------------- -ACS-MSN2100 up N/A r-sonic-01 x86_64-mlnx_msn2100-r0 ff:ff:ff:ff:ff:00 disable N/A N/A ToRRouter traditional N/A +HWSKU DEFAULT BGP STATUS DOCKER ROUTING CONFIG MODE HOSTNAME PLATFORM MAC DEFAULT PFCWD STATUS BGP ASN DEPLOYMENT ID NON EXISTING FIELD TYPE BUFFER MODEL FRR MGMT FRAMEWORK CONFIG +----------- -------------------- ---------------------------- ---------- ---------------------- ----------------- ---------------------- --------- --------------- -------------------- --------- -------------- --------------------------- +ACS-MSN2100 up N/A r-sonic-01 x86_64-mlnx_msn2100-r0 ff:ff:ff:ff:ff:00 disable N/A N/A N/A ToRRouter traditional N/A """ show_device_metadata_localhost_changed_buffer_model="""\ -HWSKU DEFAULT BGP STATUS DOCKER ROUTING CONFIG MODE HOSTNAME PLATFORM MAC DEFAULT PFCWD STATUS BGP ASN DEPLOYMENT ID TYPE BUFFER MODEL FRR MGMT FRAMEWORK CONFIG ------------ -------------------- ---------------------------- ---------- ---------------------- ----------------- ---------------------- --------- --------------- --------- -------------- --------------------------- -ACS-MSN2100 up N/A r-sonic-01 x86_64-mlnx_msn2100-r0 ff:ff:ff:ff:ff:00 disable N/A N/A ToRRouter dynamic N/A +HWSKU DEFAULT BGP STATUS DOCKER ROUTING CONFIG MODE HOSTNAME PLATFORM MAC DEFAULT PFCWD STATUS BGP ASN DEPLOYMENT ID NON EXISTING FIELD TYPE BUFFER MODEL FRR MGMT FRAMEWORK CONFIG +----------- -------------------- ---------------------------- ---------- ---------------------- ----------------- ---------------------- --------- --------------- -------------------- --------- -------------- --------------------------- +ACS-MSN2100 up N/A r-sonic-01 x86_64-mlnx_msn2100-r0 ff:ff:ff:ff:ff:00 disable N/A N/A N/A ToRRouter dynamic N/A """ diff --git a/tests/cli_autogen_input/autogen_test/sonic-device_metadata.yang b/tests/cli_autogen_input/autogen_test/sonic-device_metadata.yang index 400cbf3bcd..593f17bac3 100644 --- a/tests/cli_autogen_input/autogen_test/sonic-device_metadata.yang +++ b/tests/cli_autogen_input/autogen_test/sonic-device_metadata.yang @@ -88,6 +88,10 @@ module sonic-device_metadata { type uint32; } + leaf non_existing_field { + type uint32; + } + leaf type { type string { length 1..255; diff --git a/tests/cli_autogen_test.py b/tests/cli_autogen_test.py index 13407d1c13..6c4bd334ad 100644 --- a/tests/cli_autogen_test.py +++ b/tests/cli_autogen_test.py @@ -109,6 +109,18 @@ def test_config_device_metadata(self): assert result.exit_code == SUCCESS assert result.output == show_cmd_output.show_device_metadata_localhost_changed_buffer_model + def test_config_device_metadata_non_existing_field(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = mock_db_path + db = Db() + runner = CliRunner() + + result = runner.invoke( + config_main.config.commands['device-metadata'].commands['localhost'].commands['non-existing-field'], ['12'], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS @pytest.mark.parametrize("parameter,value", [ ('default-bgp-status', INVALID_VALUE), From 827358f22c9e072edab2b358801ce2953400aa6b Mon Sep 17 00:00:00 2001 From: Vivek R Date: Sun, 27 Mar 2022 12:12:27 -0700 Subject: [PATCH 02/11] [debug dump] dump interface module added (#2070) Added the Interface Module to the Debug Dump Utility Added the Corresponding UT's Refactored Common methods required into match_helper.py --- dump/helper.py | 5 +- dump/match_helper.py | 85 ++++++ dump/plugins/interface.py | 266 ++++++++++++++++++ dump/plugins/port.py | 15 +- dump/plugins/portchannel.py | 62 +--- dump/plugins/vlan.py | 12 +- tests/dump_input/interface/appl_db.json | 58 ++++ tests/dump_input/interface/asic_db.json | 80 ++++++ tests/dump_input/interface/config_db.json | 49 ++++ tests/dump_input/interface/state_db.json | 38 +++ .../dump_tests/module_tests/interface_test.py | 207 ++++++++++++++ 11 files changed, 800 insertions(+), 77 deletions(-) create mode 100644 dump/match_helper.py create mode 100644 dump/plugins/interface.py create mode 100644 tests/dump_input/interface/appl_db.json create mode 100644 tests/dump_input/interface/asic_db.json create mode 100644 tests/dump_input/interface/config_db.json create mode 100644 tests/dump_input/interface/state_db.json create mode 100644 tests/dump_tests/module_tests/interface_test.py diff --git a/dump/helper.py b/dump/helper.py index 7da893a464..3a882c0771 100644 --- a/dump/helper.py +++ b/dump/helper.py @@ -1,13 +1,16 @@ import os, sys, json + def create_template_dict(dbs): """ Generate a Template which will be returned by Executor Classes """ return {db: {'keys': [], 'tables_not_found': []} for db in dbs} + def verbose_print(str): if "VERBOSE" in os.environ and os.environ["VERBOSE"] == "1": print(str) + def handle_error(err_str, excep=False): """ Handles general error conditions, if any experienced by the module, @@ -17,7 +20,7 @@ def handle_error(err_str, excep=False): raise Exception("ERROR : {}".format(err_str)) else: print("ERROR : {}".format(err_str), file = sys.stderr) - + def handle_multiple_keys_matched_error(err_str, key_to_go_with="", excep=False): if excep: diff --git a/dump/match_helper.py b/dump/match_helper.py new file mode 100644 index 0000000000..9493a83458 --- /dev/null +++ b/dump/match_helper.py @@ -0,0 +1,85 @@ +from dump.match_infra import MatchRequest +from dump.helper import handle_multiple_keys_matched_error + +# Port Helper Methods + +def fetch_port_oid(match_engine, port_name, ns): + """ + Fetches thr relevant SAI_OBJECT_TYPE_PORT given port name + """ + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF", key_pattern="*", field="SAI_HOSTIF_ATTR_NAME", + value=port_name, return_fields=["SAI_HOSTIF_ATTR_OBJ_ID"], ns=ns) + ret = match_engine.fetch(req) + asic_port_obj_id = "" + if not ret["error"] and len(ret["keys"]) != 0: + sai_hostif_obj_key = ret["keys"][-1] + if sai_hostif_obj_key in ret["return_values"] and "SAI_HOSTIF_ATTR_OBJ_ID" in ret["return_values"][sai_hostif_obj_key]: + asic_port_obj_id = ret["return_values"][sai_hostif_obj_key]["SAI_HOSTIF_ATTR_OBJ_ID"] + return req, asic_port_obj_id, ret + +# Vlan Helper Methods + +def fetch_vlan_oid(match_engine, vlan_name, ns): + # Convert 'Vlanxxx' to 'xxx' + if vlan_name[0:4] != "Vlan" or not vlan_name[4:].isnumeric(): + vlan_num = -1 + else: + vlan_num = int(vlan_name[4:]) + + # Find the table named "ASIC_STATE:SAI_OBJECT_TYPE_VLAN:*" in which SAI_VLAN_ATTR_VLAN_ID = vlan_num + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_VLAN", key_pattern="*", field="SAI_VLAN_ATTR_VLAN_ID", + value=str(vlan_num), ns=ns) + ret = match_engine.fetch(req) + vlan_oid = "" + if ret["keys"]: + vlan_oid = ret["keys"][0].split(":", 2)[-1] + return req, vlan_oid, ret + +# LAG Helper Methods + +def get_lag_members_from_cfg(match_engine, lag_name, ns): + """ + Get the members associated with a LAG from Config DB + """ + lag_members = [] + req = MatchRequest(db="CONFIG_DB", table="PORTCHANNEL_MEMBER", key_pattern=lag_name + "|*", ns=ns) + ret = match_engine.fetch(req) + for key in ret["keys"]: + lag_members.append(key.split("|")[-1]) + return req, lag_members, ret + +def get_lag_and_member_obj(match_engine, port_asic_obj, ns): + """ + Given the member port oid, fetch lag_member & lag oid's + """ + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", key_pattern="*", field="SAI_LAG_MEMBER_ATTR_PORT_ID", + value=port_asic_obj, return_fields=["SAI_LAG_MEMBER_ATTR_LAG_ID"], ns=ns) + ret = match_engine.fetch(req) + lag_member_key = "" + lag_oid = "" + if not ret["error"] and ret["keys"]: + lag_member_key = ret["keys"][-1] + if lag_member_key in ret["return_values"] and "SAI_LAG_MEMBER_ATTR_LAG_ID" in ret["return_values"][lag_member_key]: + lag_oid = ret["return_values"][lag_member_key]["SAI_LAG_MEMBER_ATTR_LAG_ID"] + return lag_member_key, lag_oid + +def fetch_lag_oid(match_engine, lag_name, ns): + """ + Finding the relevant SAI_OBJECT_TYPE_LAG key directly from the ASIC is not possible given a LAG name + Thus, using the members to find SAI_LAG_MEMBER_ATTR_LAG_ID + """ + _, lag_members, _ = get_lag_members_from_cfg(match_engine, lag_name, ns) + lag_type_oids = set() + for port_name in lag_members: + _, port_asic_obj, _ = fetch_port_oid(match_engine, port_name, ns) + if port_asic_obj: + lag_member_key, lag_oid = get_lag_and_member_obj(match_engine, port_asic_obj, ns) + lag_type_oids.add(lag_oid) + lag_type_oid, lag_type_oids = "", list(lag_type_oids) + if lag_type_oids: + if len(lag_type_oids) > 1: + # Ideally, only one associated lag_oid should be present for a portchannel + handle_multiple_keys_matched_error("Multipe lag_oids matched for portchannel: {}, \ + lag_oids matched {}".format(lag_name, lag_type_oids), lag_type_oids[-1]) + lag_type_oid = lag_type_oids[-1] + return lag_type_oid diff --git a/dump/plugins/interface.py b/dump/plugins/interface.py new file mode 100644 index 0000000000..ffa226076e --- /dev/null +++ b/dump/plugins/interface.py @@ -0,0 +1,266 @@ +from sonic_py_common.interface import get_interface_table_name, get_intf_longname, VLAN_SUB_INTERFACE_SEPARATOR +from sonic_py_common.multi_asic import DEFAULT_NAMESPACE +from dump.match_infra import MatchRequest +from dump.helper import create_template_dict, handle_error +from dump.match_helper import fetch_port_oid, fetch_vlan_oid, fetch_lag_oid +from swsscommon.swsscommon import SonicDBConfig +from .executor import Executor + + +class Interface(Executor): + """ + Debug Dump Plugin for Interface Module. + Interface can be of Ethernet, PortChannel, Loopback, Vlan or SubInterface type + Human readable intf string names are supported + """ + ARG_NAME = "intf_name" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.ns = DEFAULT_NAMESPACE + self.intf_type = "" + self.ret_temp = dict() + self.valid_cfg_tables = set(["INTERFACE", + "PORTCHANNEL_INTERFACE", + "VLAN_INTERFACE", + "LOOPBACK_INTERFACE", + "VLAN_SUB_INTERFACE"]) + + def get_all_args(self, ns=DEFAULT_NAMESPACE): + """ + Fetch all the interfaces from the valid cfg tables + """ + req = MatchRequest(db="CONFIG_DB", table="*INTERFACE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + all_intfs = ret["keys"] + filtered_keys = [] + for key in all_intfs: + num_sep = key.count("|") + if num_sep == 1: + filtered_keys.append(key.split("|")[-1]) + return filtered_keys + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "STATE_DB", "ASIC_DB"]) + self.intf_name = params[Interface.ARG_NAME] + self.ns = params["namespace"] + # CONFIG_DB + self.intf_type = self.init_intf_config_info() + # APPL_DB + self.init_intf_appl_info() + # STATE_DB + self.init_intf_state_info() + # ASIC_DB + self.init_intf_asic_info() + return self.ret_temp + + def get_sep(self, db): + return SonicDBConfig.getSeparator(db) + + def add_intf_keys(self, db_name, table_name): + # Fetch Interface Keys + req = MatchRequest(db=db_name, table=table_name, key_pattern=self.intf_name, ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + # Fetch IP & Interface Related keys + req = MatchRequest(db=db_name, table=table_name, key_pattern=self.intf_name+self.get_sep(db_name)+"*", ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"], False) + + def init_intf_config_info(self): + intf_table_name = get_interface_table_name(self.intf_name) + if not intf_table_name: + self.ret_temp["CONFIG_DB"]["tables_not_found"].extend(list(self.valid_cfg_tables)) + else: + self.add_intf_keys("CONFIG_DB", intf_table_name) + return intf_table_name + + def init_intf_appl_info(self): + self.add_intf_keys("APPL_DB", "INTF_TABLE") + + def init_intf_state_info(self): + self.add_intf_keys("STATE_DB", "INTERFACE_TABLE") + + def init_intf_asic_info(self): + """ + Fetch SAI_OBJECT_TYPE_ROUTER_INTERFACE ASIC Object for the corresponding interface + To find the relevant ASIC RIF object, this method would need the following: + 1) INTERFACE - SAI_OBJECT_TYPE_PORT oid + 2) PORTCHANNEL - SAI_OBJECT_TYPE_LAG oid + 3) VLAN - SAI_OBJECT_TYPE_VLAN + 4) SUB_INTERFACE - SAI_OBJECT_TYPE_PORT/SAI_OBJECT_TYPE_LAG & SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID + """ + rif_obj = RIF.initialize(self) + rif_obj.collect() + return + +class RIF(object): + """ + Base Class for RIF type + """ + @staticmethod + def initialize(intf_obj): + if intf_obj.intf_type == "INTERFACE": + return PortRIF(intf_obj) + elif intf_obj.intf_type == "PORTCHANNEL_INTERFACE": + return LagRIF(intf_obj) + elif intf_obj.intf_type == "VLAN_INTERFACE": + return VlanRIF(intf_obj) + elif intf_obj.intf_type == "LOOPBACK_INTERFACE": + return LpbRIF(intf_obj) + elif intf_obj.intf_type == "VLAN_SUB_INTERFACE": + return SubIntfRif(intf_obj) + return RIF(intf_obj) + + def __init__(self, intf_obj): + self.intf = intf_obj + + def fetch_rif_keys_using_port_oid(self, port_oid, rfs=["SAI_ROUTER_INTERFACE_ATTR_TYPE"]): + if not port_oid: + port_oid = "INVALID" + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE", key_pattern="*", field="SAI_ROUTER_INTERFACE_ATTR_PORT_ID", + value=port_oid, return_fields=rfs, ns=self.intf.ns) + ret = self.intf.match_engine.fetch(req) + return req, ret + + def verify_valid_rif_type(self, ret, exp_rif_type=""): + if not ret or not exp_rif_type: + return True, "" + + rif_type = "" + if not ret["error"] and ret["keys"]: + rif_key = ret["keys"][-1] + rif_type = ret.get("return_values", {}).get(rif_key, {}).get("SAI_ROUTER_INTERFACE_ATTR_TYPE", "") + + if rif_type == exp_rif_type: + return True, rif_type + else: + return False, rif_type + + def sanity_check_rif_type(self, ret, rif_oid, exp_type, str_name): + # Sanity check to see if the TYPE is SAI_ROUTER_INTERFACE_TYPE_PORT + _, recv_type = self.verify_valid_rif_type(ret, exp_type) + if exp_type != recv_type: + err_str = "TYPE Mismatch on SAI_OBJECT_TYPE_ROUTER_INTERFACE, {} oid:{}, expected type:{}, recieved type:{}" + handle_error(err_str.format(str_name, rif_oid, exp_type, recv_type), False) + return + + def collect(self): + self.intf.ret_temp["ASIC_DB"]["tables_not_found"].extend(["ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"]) + return + + +class LpbRIF(RIF): + """ + Handler for Loopback Interface + """ + def collect(self): + # When an ip is added to Loopback interface, + # no ROUTER_INTERFACE asic obj is created, so skipping it + # and not adding to tables not found + return + + +class PortRIF(RIF): + """ + Handler for Port type Obj + """ + def collect(self): + # Get port oid from port name + _, port_oid, _ = fetch_port_oid(self.intf.match_engine, self.intf.intf_name, self.intf.ns) + # Use Port oid to get the RIF + req, ret = self.fetch_rif_keys_using_port_oid(port_oid) + rif_oids = self.intf.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + if rif_oids: + # Sanity check to see if the TYPE is SAI_ROUTER_INTERFACE_TYPE_PORT + exp_type = "SAI_ROUTER_INTERFACE_TYPE_PORT" + self.sanity_check_rif_type(ret, rif_oids[-1], exp_type, "PORT") + + +class VlanRIF(RIF): + """ + Handler for Vlan type Obj + """ + def fetch_rif_keys_using_vlan_oid(self, vlan_oid): + if not vlan_oid: + vlan_oid = "INVALID" + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE", key_pattern="*", field="SAI_ROUTER_INTERFACE_ATTR_VLAN_ID", + value=vlan_oid, return_fields=["SAI_ROUTER_INTERFACE_ATTR_TYPE"], ns=self.intf.ns) + ret = self.intf.match_engine.fetch(req) + return req, ret + + def collect(self): + # Get vlan oid from vlan name + _, vlan_oid, _ = fetch_vlan_oid(self.intf.match_engine, self.intf.intf_name, self.intf.ns) + # Use vlan oid to get the RIF + req, ret = self.fetch_rif_keys_using_vlan_oid(vlan_oid) + rif_oids = self.intf.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + if rif_oids: + # Sanity check to see if the TYPE is SAI_ROUTER_INTERFACE_TYPE_VLAN + exp_type = "SAI_ROUTER_INTERFACE_TYPE_VLAN" + self.sanity_check_rif_type(ret, rif_oids[-1], exp_type, "VLAN") + + +class LagRIF(RIF): + """ + Handler for PortChannel/LAG type Obj + """ + def collect(self): + # Get lag oid from lag name + lag_oid = fetch_lag_oid(self.intf.match_engine, self.intf.intf_name, self.intf.ns) + # Use vlan oid to get the RIF + req, ret = self.fetch_rif_keys_using_port_oid(lag_oid) + rif_oids = self.intf.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + if rif_oids: + # Sanity check to see if the TYPE is SAI_ROUTER_INTERFACE_TYPE_PORT + exp_type = "SAI_ROUTER_INTERFACE_TYPE_PORT" + self.sanity_check_rif_type(ret, rif_oids[-1], exp_type, "LAG") + + +class SubIntfRif(RIF): + """ + Handler for PortChannel/LAG type Obj + """ + def fetch_vlan_id_subintf(self, sub_intf): + req = MatchRequest(db="CONFIG_DB", table="VLAN_SUB_INTERFACE", key_pattern=sub_intf, return_fields=["vlan"], ns=self.intf.ns) + ret = self.intf.match_engine.fetch(req) + vlan_id = "" + if not ret["error"] and ret["keys"]: + key = ret["keys"][-1] + vlan_id = ret["return_values"].get(key, {}).get("vlan", "") + return vlan_id + + def collect(self): + """ + To match the RIF object, two checks have to be performed, + 1) SAI_ROUTER_INTERFACE_ATTR_PORT_ID + - This can either be SAI_OBJECT_TYPE_PORT or SAI_OBJECT_TYPE_LAG + 2) SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID + - This will be Vlan Number (uint16) + """ + intf_oid = "" + parent_port, _ = self.intf.intf_name.split(VLAN_SUB_INTERFACE_SEPARATOR) + parent_port = get_intf_longname(parent_port) + vlan_id = self.fetch_vlan_id_subintf(self.intf.intf_name) + if parent_port.startswith("Eth"): + _, intf_oid, _ = fetch_port_oid(self.intf.match_engine, parent_port, self.intf.ns) + else: + intf_oid = fetch_lag_oid(self.intf.match_engine, parent_port, self.intf.ns) + + # Use vlan oid to get the RIF + return_fields = ["SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID", "SAI_ROUTER_INTERFACE_ATTR_TYPE"] + req, ret = self.fetch_rif_keys_using_port_oid(intf_oid, rfs=return_fields) + + # Search for keys who has SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID field + filtered_keys = [] + if not ret["error"] and len(ret['keys']) > 0: + for key in ret["keys"]: + rcv_vlan_id = ret.get("return_values", {}).get(key, {}).get("SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID", "") + if rcv_vlan_id == vlan_id: + filtered_keys.append(key) + break + + rif_oids = self.intf.add_to_ret_template(req.table, req.db, filtered_keys, ret["error"]) + if rif_oids: + exp_type = "SAI_ROUTER_INTERFACE_TYPE_SUB_PORT" + self.sanity_check_rif_type(ret, rif_oids[-1], exp_type, "SUB_INTERFACE") diff --git a/dump/plugins/port.py b/dump/plugins/port.py index f3f74897a3..2cffda2fd7 100644 --- a/dump/plugins/port.py +++ b/dump/plugins/port.py @@ -1,5 +1,6 @@ from dump.match_infra import MatchRequest from dump.helper import create_template_dict +from dump.match_helper import fetch_port_oid from .executor import Executor @@ -45,18 +46,8 @@ def init_state_port_info(self, port_name): self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) def init_asic_hostif_info(self, port_name): - req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF", key_pattern="*", field="SAI_HOSTIF_ATTR_NAME", - value=port_name, return_fields=["SAI_HOSTIF_ATTR_OBJ_ID"], ns=self.ns) - ret = self.match_engine.fetch(req) - asic_port_obj_id = "" - - if not ret["error"] and len(ret["keys"]) != 0: - self.ret_temp[req.db]["keys"] = ret["keys"] - sai_hostif_obj_key = ret["keys"][-1] - if sai_hostif_obj_key in ret["return_values"] and "SAI_HOSTIF_ATTR_OBJ_ID" in ret["return_values"][sai_hostif_obj_key]: - asic_port_obj_id = ret["return_values"][sai_hostif_obj_key]["SAI_HOSTIF_ATTR_OBJ_ID"] - else: - self.ret_temp[req.db]["tables_not_found"] = [req.table] + req, asic_port_obj_id, ret = fetch_port_oid(self.match_engine, port_name, self.ns) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) return asic_port_obj_id def init_asic_port_info(self, asic_port_obj_id): diff --git a/dump/plugins/portchannel.py b/dump/plugins/portchannel.py index b71c7c4fc3..659412e988 100644 --- a/dump/plugins/portchannel.py +++ b/dump/plugins/portchannel.py @@ -1,5 +1,6 @@ from dump.match_infra import MatchRequest from dump.helper import create_template_dict +from dump.match_helper import fetch_lag_oid from .executor import Executor @@ -25,16 +26,13 @@ def execute(self, params_dict): self.lag_name = params_dict[Portchannel.ARG_NAME] self.ns = params_dict["namespace"] # CONFIG_DB - lag_found = self.init_lag_config_info() - if lag_found: - self.init_lag_member_config_info() + self.init_lag_config_info() # APPL_DB self.init_lag_appl_info() # STATE_DB self.init_lag_state_info() # ASIC_DB - lag_type_objs_asic = self.init_lag_member_type_obj_asic_info() - self.init_lag_asic_info(lag_type_objs_asic) + self.init_lag_asic_info() return self.ret_temp def init_lag_config_info(self): @@ -42,12 +40,6 @@ def init_lag_config_info(self): ret = self.match_engine.fetch(req) return self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) - def init_lag_member_config_info(self): - req = MatchRequest(db="CONFIG_DB", table="PORTCHANNEL_MEMBER", key_pattern=self.lag_name + "|*", ns=self.ns) - ret = self.match_engine.fetch(req) - for key in ret["keys"]: - self.lag_members.add(key.split("|")[-1]) - def init_lag_appl_info(self): req = MatchRequest(db="APPL_DB", table="LAG_TABLE", key_pattern=self.lag_name, ns=self.ns) ret = self.match_engine.fetch(req) @@ -58,47 +50,9 @@ def init_lag_state_info(self): ret = self.match_engine.fetch(req) return self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) - def init_lag_asic_info(self, lag_type_objs_asic): - if len(lag_type_objs_asic) == 0: - self.ret_temp["ASIC_DB"]["tables_not_found"].extend(["ASIC_STATE:SAI_OBJECT_TYPE_LAG"]) - return - for lag_asic_obj in lag_type_objs_asic: - req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_LAG", key_pattern=lag_asic_obj, ns=self.ns) - ret = self.match_engine.fetch(req) - self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) - - def init_lag_member_type_obj_asic_info(self): - """ - Finding the relevant SAI_OBJECT_TYPE_LAG key directly from the ASIC is not possible given a LAG name - Thus, using the members to find SAI_LAG_MEMBER_ATTR_LAG_ID - """ - lag_type_objs_asic = set() - for port_name in self.lag_members: - port_asic_obj = self.get_port_asic_obj(port_name) - if port_asic_obj: - lag_member_key, lag_oid = self.get_lag_and_member_obj(port_asic_obj) - lag_type_objs_asic.add(lag_oid) - return lag_type_objs_asic - - def get_port_asic_obj(self, port_name): - req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF", key_pattern="*", field="SAI_HOSTIF_ATTR_NAME", - value=port_name, return_fields=["SAI_HOSTIF_ATTR_OBJ_ID"], ns=self.ns) - ret = self.match_engine.fetch(req) - asic_port_obj_id = "" - if not ret["error"] and ret["keys"]: - sai_hostif_obj_key = ret["keys"][-1] - if sai_hostif_obj_key in ret["return_values"] and "SAI_HOSTIF_ATTR_OBJ_ID" in ret["return_values"][sai_hostif_obj_key]: - asic_port_obj_id = ret["return_values"][sai_hostif_obj_key]["SAI_HOSTIF_ATTR_OBJ_ID"] - return asic_port_obj_id - - def get_lag_and_member_obj(self, port_asic_obj): - req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", key_pattern="*", field="SAI_LAG_MEMBER_ATTR_PORT_ID", - value=port_asic_obj, return_fields=["SAI_LAG_MEMBER_ATTR_LAG_ID"], ns=self.ns) + def init_lag_asic_info(self): + # Fetch Lag Type Asic Obj from CFG DB given lag name + lag_asic_obj = fetch_lag_oid(self.match_engine, self.lag_name, self.ns) + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_LAG", key_pattern=lag_asic_obj, ns=self.ns) ret = self.match_engine.fetch(req) - lag_member_key = "" - lag_oid = "" - if not ret["error"] and ret["keys"]: - lag_member_key = ret["keys"][-1] - if lag_member_key in ret["return_values"] and "SAI_LAG_MEMBER_ATTR_LAG_ID" in ret["return_values"][lag_member_key]: - lag_oid = ret["return_values"][lag_member_key]["SAI_LAG_MEMBER_ATTR_LAG_ID"] - return lag_member_key, lag_oid + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) diff --git a/dump/plugins/vlan.py b/dump/plugins/vlan.py index 6f5c9b90a9..f3a99f4250 100644 --- a/dump/plugins/vlan.py +++ b/dump/plugins/vlan.py @@ -1,6 +1,7 @@ from .executor import Executor from dump.match_infra import MatchRequest from dump.helper import create_template_dict +from dump.match_helper import fetch_vlan_oid class Vlan(Executor): @@ -43,14 +44,5 @@ def init_state_vlan_info(self, vlan_name): self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) def init_asic_vlan_info(self, vlan_name): - # Convert 'Vlanxxx' to 'xxx' - if vlan_name[0:4] != "Vlan" or not vlan_name[4:].isnumeric(): - self.ret_temp["ASIC_DB"]["tables_not_found"] =["ASIC_STATE:SAI_OBJECT_TYPE_VLAN"] - return {}, {} - vlan_num = int(vlan_name[4:]) - - # Find the table named "ASIC_STATE:SAI_OBJECT_TYPE_VLAN:*" in which SAI_VLAN_ATTR_VLAN_ID = vlan_num - req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_VLAN", key_pattern="*", field="SAI_VLAN_ATTR_VLAN_ID", - value=str(vlan_num), ns=self.ns) - ret = self.match_engine.fetch(req) + req, _, ret = fetch_vlan_oid(self.match_engine, vlan_name, self.ns) self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) diff --git a/tests/dump_input/interface/appl_db.json b/tests/dump_input/interface/appl_db.json new file mode 100644 index 0000000000..772cc32332 --- /dev/null +++ b/tests/dump_input/interface/appl_db.json @@ -0,0 +1,58 @@ +{ + "INTF_TABLE:Ethernet16":{ + "NULL": "NULL", + "mac_addr": "00:00:00:00:00:00" + }, + "INTF_TABLE:Ethernet16:3.3.3.1/24": { + "family": "IPv4", + "scope": "global" + }, + "INTF_TABLE:PortChannel1111": { + "NULL": "NULL", + "mac_addr": "00:00:00:00:00:00" + }, + "INTF_TABLE:PortChannel1111:1.1.1.1/24": { + "family": "IPv4", + "scope": "global" + }, + "INTF_TABLE:Vlan10": { + "NULL": "NULL", + "mac_addr": "00:00:00:00:00:00" + }, + "INTF_TABLE:Vlan10:2.2.2.1/24": { + "family": "IPv4", + "scope": "global" + }, + "INTF_TABLE:Eth0.1": { + "admin_status": "up", + "mac_addr": "00:00:00:00:00:00", + "mtu": "9100", + "vlan": "10" + }, + "INTF_TABLE:Eth0.1:9.9.9.9/24": { + "family": "IPv4", + "scope": "global" + }, + "INTF_TABLE:PortChannel1234": { + "NULL": "NULL", + "mac_addr": "00:00:00:00:00:00" + }, + "INTF_TABLE:PortChannel1234:7.7.7.1/24": { + "family": "IPv4", + "scope": "global" + }, + "INTF_TABLE:Loopback0": { + "NULL": "NULL", + "mac_addr": "00:00:00:00:00:00" + }, + "INTF_TABLE:Loopback0:10.1.0.1/32": { + "family": "IPv4", + "scope": "global" + }, + "INTF_TABLE:Eth4.1": { + "admin_status": "up", + "mac_addr": "00:00:00:00:00:00", + "mtu": "9100", + "vlan": "100" + } +} \ No newline at end of file diff --git a/tests/dump_input/interface/asic_db.json b/tests/dump_input/interface/asic_db.json new file mode 100644 index 0000000000..bbad0c9cd9 --- /dev/null +++ b/tests/dump_input/interface/asic_db.json @@ -0,0 +1,80 @@ +{ + "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd000000000a26": { + "SAI_HOSTIF_ATTR_NAME": "Ethernet16", + "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000937", + "SAI_HOSTIF_ATTR_OPER_STATUS": "true", + "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000c7c": { + "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", + "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": "oid:0x1000000000937", + "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": "1C:34:DA:1C:9F:00", + "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_PORT", + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000010" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_VLAN:oid:0x26000000000c78": { + "SAI_VLAN_ATTR_VLAN_ID": "10" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000c7b": { + "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", + "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": "1C:34:DA:1C:9F:00", + "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_VLAN", + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000010", + "SAI_ROUTER_INTERFACE_ATTR_VLAN_ID": "oid:0x26000000000c78" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_LAG:oid:0x2000000000c73": { + "NULL": "NULL" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000c77": { + "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", + "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": "oid:0x2000000000c73", + "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": "1C:34:DA:1C:9F:00", + "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_PORT", + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000010" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000056e": { + "SAI_HOSTIF_ATTR_NAME": "Ethernet0", + "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000528", + "SAI_HOSTIF_ATTR_OPER_STATUS": "true", + "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV", + "SAI_HOSTIF_ATTR_VLAN_TAG": "SAI_HOSTIF_VLAN_TAG_KEEP" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x60000000005e9": { + "SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE": "true", + "SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE": "true", + "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", + "SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID": "10", + "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": "oid:0x1000000000528", + "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": "7C:FE:90:F5:36:40", + "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_SUB_PORT", + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000002" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000058c": { + "SAI_HOSTIF_ATTR_NAME": "Ethernet120", + "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000318", + "SAI_HOSTIF_ATTR_OPER_STATUS": "false", + "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER:oid:0x1b0000000005eb": { + "SAI_LAG_MEMBER_ATTR_EGRESS_DISABLE": "true", + "SAI_LAG_MEMBER_ATTR_INGRESS_DISABLE": "true", + "SAI_LAG_MEMBER_ATTR_LAG_ID": "oid:0x200000000058e", + "SAI_LAG_MEMBER_ATTR_PORT_ID": "oid:0x1000000000318" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_LAG:oid:0x200000000058e": { + "NULL": "NULL" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x60000000005ec": { + "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", + "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": "oid:0x200000000058e", + "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": "7C:FE:90:F5:36:40", + "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_PORT", + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000002" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000056f": { + "SAI_HOSTIF_ATTR_NAME": "Ethernet4", + "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000529", + "SAI_HOSTIF_ATTR_OPER_STATUS": "true", + "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV" + } +} diff --git a/tests/dump_input/interface/config_db.json b/tests/dump_input/interface/config_db.json new file mode 100644 index 0000000000..afeaccad9f --- /dev/null +++ b/tests/dump_input/interface/config_db.json @@ -0,0 +1,49 @@ +{ + "INTERFACE|Ethernet16": { + "NULL" : "NULL" + }, + "INTERFACE|Ethernet16|3.3.3.1/24": { + "NULL" : "NULL" + }, + "VLAN_INTERFACE|Vlan10": { + "NULL" : "NULL" + }, + "VLAN_INTERFACE|Vlan10|2.2.2.1/24": { + "NULL" : "NULL" + }, + "PORTCHANNEL_INTERFACE|PortChannel1111": { + "NULL": "NULL" + }, + "PORTCHANNEL_INTERFACE|PortChannel1111|1.1.1.1/24": { + "NULL": "NULL" + }, + "PORTCHANNEL_INTERFACE|PortChannel1234": { + "NULL": "NULL" + }, + "PORTCHANNEL_INTERFACE|PortChannel1234|7.7.7.1/24": { + "NULL": "NULL" + }, + "PORTCHANNEL_MEMBER|PortChannel1234|Ethernet120": { + "NULL": "NULL" + }, + "PORTCHANNEL_MEMBER|PortChannel1234|Ethernet124": { + "NULL": "NULL" + }, + "VLAN_SUB_INTERFACE|Eth0.1": { + "admin_status": "up", + "vlan": "10" + }, + "VLAN_SUB_INTERFACE|Eth0.1|9.9.9.9/24": { + "NULL": "NULL" + }, + "LOOPBACK_INTERFACE|Loopback0": { + "NULL": "NULL" + }, + "LOOPBACK_INTERFACE|Loopback0|10.1.0.1/32": { + "NULL": "NULL" + }, + "VLAN_SUB_INTERFACE|Eth4.1": { + "admin_status": "up", + "vlan": "100" + } +} diff --git a/tests/dump_input/interface/state_db.json b/tests/dump_input/interface/state_db.json new file mode 100644 index 0000000000..4d7d9e5d1f --- /dev/null +++ b/tests/dump_input/interface/state_db.json @@ -0,0 +1,38 @@ +{ + "INTERFACE_TABLE|Ethernet16": { + "vrf": "" + }, + "INTERFACE_TABLE|Ethernet16|3.3.3.1/24": { + "state": "ok" + }, + "INTERFACE_TABLE|PortChannel1111": { + "vrf": "" + }, + "INTERFACE_TABLE|PortChannel1111|1.1.1.1/24": { + "state": "ok" + }, + "INTERFACE_TABLE|Vlan10": { + "vrf": "" + }, + "INTERFACE_TABLE|Vlan10|2.2.2.1/24": { + "state": "ok" + }, + "INTERFACE_TABLE|Eth0.1": { + "vrf": "" + }, + "INTERFACE_TABLE|Eth0.1|9.9.9.9/24": { + "state": "ok" + }, + "INTERFACE_TABLE|PortChannel1234": { + "vrf": "" + }, + "INTERFACE_TABLE|PortChannel1234|7.7.7.1/24": { + "state": "ok" + }, + "INTERFACE_TABLE|Loopback0": { + "vrf": "" + }, + "INTERFACE_TABLE|Loopback0|10.1.0.1/32": { + "state": "ok" + } +} diff --git a/tests/dump_tests/module_tests/interface_test.py b/tests/dump_tests/module_tests/interface_test.py new file mode 100644 index 0000000000..969272b069 --- /dev/null +++ b/tests/dump_tests/module_tests/interface_test.py @@ -0,0 +1,207 @@ +import json +import os +import sys +import jsonpatch +import unittest +import pytest +from deepdiff import DeepDiff +from mock import patch +from dump.helper import create_template_dict, sort_lists, populate_mock +from dump.plugins.interface import Interface +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +port_files_path = os.path.join(dump_test_input, "interface") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['CONFIG_DB'] = os.path.join(port_files_path, "config_db.json") +dedicated_dbs['APPL_DB'] = os.path.join(port_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(port_files_path, "asic_db.json") +dedicated_dbs['STATE_DB'] = os.path.join(port_files_path, "state_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + from ...mock_tables import dbconnector + db = SonicV2Connector() + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + DEF_NS = '' # Default Namespace + conn_pool.cache = {DEF_NS: {'conn': db, + 'connected_to': set(db_names)}} + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + +@pytest.mark.usefixtures("match_engine") +class TestInterfaceModule: + def test_port_type_interface(self, match_engine): + """ + Scenario: Test the flow fetching objs related to PORT_TYPE interfac + """ + params = {Interface.ARG_NAME: "Ethernet16", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["keys"].extend(["INTERFACE|Ethernet16", "INTERFACE|Ethernet16|3.3.3.1/24"]) + expect["APPL_DB"]["keys"].extend(["INTF_TABLE:Ethernet16", "INTF_TABLE:Ethernet16:3.3.3.1/24"]) + expect["STATE_DB"]["keys"].extend(["INTERFACE_TABLE|Ethernet16", "INTERFACE_TABLE|Ethernet16|3.3.3.1/24"]) + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000c7c") + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_vlan_type_interface(self, match_engine): + """ + Scenario: Test the flow fetching objs related to VLAN_TYPE interfac + """ + params = {Interface.ARG_NAME: "Vlan10", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["keys"].extend(["VLAN_INTERFACE|Vlan10", "VLAN_INTERFACE|Vlan10|2.2.2.1/24"]) + expect["APPL_DB"]["keys"].extend(["INTF_TABLE:Vlan10", "INTF_TABLE:Vlan10:2.2.2.1/24"]) + expect["STATE_DB"]["keys"].extend(["INTERFACE_TABLE|Vlan10", "INTERFACE_TABLE|Vlan10|2.2.2.1/24"]) + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000c7b") + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_lag_type_interface_no_members(self, match_engine): + """ + Scenario: Test the flow fetching objs related to LAG_TYPE iface without members + """ + params = {Interface.ARG_NAME: "PortChannel1111", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["keys"].extend(["PORTCHANNEL_INTERFACE|PortChannel1111", "PORTCHANNEL_INTERFACE|PortChannel1111|1.1.1.1/24"]) + expect["APPL_DB"]["keys"].extend(["INTF_TABLE:PortChannel1111", "INTF_TABLE:PortChannel1111:1.1.1.1/24"]) + expect["STATE_DB"]["keys"].extend(["INTERFACE_TABLE|PortChannel1111", "INTERFACE_TABLE|PortChannel1111|1.1.1.1/24"]) + expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_lag_type_interface(self, match_engine): + """ + Scenario: Test the flow fetching objs related to LAG_TYPE iface + """ + params = {Interface.ARG_NAME: "PortChannel1234", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["keys"].extend(["PORTCHANNEL_INTERFACE|PortChannel1234", "PORTCHANNEL_INTERFACE|PortChannel1234|7.7.7.1/24"]) + expect["APPL_DB"]["keys"].extend(["INTF_TABLE:PortChannel1234", "INTF_TABLE:PortChannel1234:7.7.7.1/24"]) + expect["STATE_DB"]["keys"].extend(["INTERFACE_TABLE|PortChannel1234", "INTERFACE_TABLE|PortChannel1234|7.7.7.1/24"]) + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x60000000005ec") + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_subintf_type_interface(self, match_engine): + """ + Scenario: Test the flow fetching objs related to Sub-Interface iface + """ + params = {Interface.ARG_NAME: "Eth0.1", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["keys"].extend(["VLAN_SUB_INTERFACE|Eth0.1", "VLAN_SUB_INTERFACE|Eth0.1|9.9.9.9/24"]) + expect["APPL_DB"]["keys"].extend(["INTF_TABLE:Eth0.1", "INTF_TABLE:Eth0.1:9.9.9.9/24"]) + expect["STATE_DB"]["keys"].extend(["INTERFACE_TABLE|Eth0.1", "INTERFACE_TABLE|Eth0.1|9.9.9.9/24"]) + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x60000000005e9") + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_no_interface(self, match_engine): + """ + Scenario: Test the flow fetching objs related to an interface which is not present + """ + params = {Interface.ARG_NAME: "Ethernet160", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["tables_not_found"].extend(["INTERFACE"]) + expect["APPL_DB"]["tables_not_found"].extend(["INTF_TABLE"]) + expect["STATE_DB"]["tables_not_found"].extend(["INTERFACE_TABLE"]) + expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_invalid_interface(self, match_engine): + """ + Scenario: Test the flow fetching objs related to an interface which is invalid + """ + params = {Interface.ARG_NAME: "Whatever", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["tables_not_found"].extend(["INTERFACE", + "PORTCHANNEL_INTERFACE", + "VLAN_INTERFACE", + "VLAN_SUB_INTERFACE", + "LOOPBACK_INTERFACE"]) + expect["APPL_DB"]["tables_not_found"].extend(["INTF_TABLE"]) + expect["STATE_DB"]["tables_not_found"].extend(["INTERFACE_TABLE"]) + expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_loopback_interface(self, match_engine): + """ + Scenario: Test the flow fetching objs related to loopback iface + """ + params = {Interface.ARG_NAME: "Loopback0", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["keys"].extend(["LOOPBACK_INTERFACE|Loopback0", "LOOPBACK_INTERFACE|Loopback0|10.1.0.1/32"]) + expect["APPL_DB"]["keys"].extend(["INTF_TABLE:Loopback0", "INTF_TABLE:Loopback0:10.1.0.1/32"]) + expect["STATE_DB"]["keys"].extend(["INTERFACE_TABLE|Loopback0", "INTERFACE_TABLE|Loopback0|10.1.0.1/32"]) + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_subintf_with_invalid_vlan(self, match_engine): + """ + Scenario: Test the flow fetching objs related to a subintf with invalid vlan + """ + params = {Interface.ARG_NAME: "Eth4.1", "namespace": ""} + m_intf = Interface(match_engine) + returned = m_intf.execute(params) + expect = create_template_dict(dbs=["CONFIG_DB", "APPL_DB", "ASIC_DB", "STATE_DB"]) + expect["CONFIG_DB"]["keys"].extend(["VLAN_SUB_INTERFACE|Eth4.1"]) + expect["APPL_DB"]["keys"].extend(["INTF_TABLE:Eth4.1"]) + expect["STATE_DB"]["tables_not_found"].extend(["INTERFACE_TABLE"]) + expect["ASIC_DB"]["tables_not_found"].extend(["ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"]) + ddiff = DeepDiff(sort_lists(returned), sort_lists(expect), ignore_order=True) + assert not ddiff, ddiff + + def test_all_args(self, match_engine): + """ + Scenario: Verify Whether the get_all_args method is working as expected + """ + params = {} + m_port = Interface(match_engine) + returned = m_port.get_all_args("") + expect = ["Ethernet16", "Vlan10", "PortChannel1111", "PortChannel1234", "Eth0.1", "Loopback0", "Eth4.1"] + ddiff = DeepDiff(expect, returned, ignore_order=True) + assert not ddiff, ddiff From c7524572abfd99eb8e64a0981b2cee0e4f337867 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Wed, 30 Mar 2022 15:21:00 +0300 Subject: [PATCH 03/11] [PBH] Implement Edit Flows (#2093) PBH Edit Flows is a second phase of PBH feature implementation. It allows user to modify the already existing objects. PBH Edit Flows offer a full entity update which assumes Config DB field ADD/UPDATE/REMOVE processing. HLD: Azure/SONiC#909 - What I did Implemented Edit Flows in scope of PBH enhancement - How I did it Implementation is done according to the PBH HLD Signed-off-by: Nazarii Hnydyn --- clear/main.py | 10 +- config/plugins/pbh.py | 1134 ++++++++++++++++++------- show/plugins/pbh.py | 58 +- tests/mock_tables/dbconnector.py | 38 +- tests/pbh_input/assert_show_output.py | 17 + tests/pbh_input/state_db.json | 26 + tests/pbh_test.py | 260 +++++- utilities_common/db.py | 4 +- 8 files changed, 1181 insertions(+), 366 deletions(-) create mode 100644 tests/pbh_input/state_db.json diff --git a/clear/main.py b/clear/main.py index 3ba0a1d735..1ad42ad786 100755 --- a/clear/main.py +++ b/clear/main.py @@ -4,10 +4,10 @@ import sys import click import utilities_common.cli as clicommon -import json from utilities_common import util_base from show.plugins.pbh import read_pbh_counters +from config.plugins.pbh import serialize_pbh_counters from . import plugins @@ -473,14 +473,8 @@ def statistics(db): pbh_rules = db.cfgdb.get_table("PBH_RULE") pbh_counters = read_pbh_counters(pbh_rules) - try: - with open('/tmp/.pbh_counters.txt', 'w') as fp: - json.dump(remap_keys(pbh_counters), fp) - except IOError as err: - pass + serialize_pbh_counters(pbh_counters) -def remap_keys(dict): - return [{'key': k, 'value': v} for k, v in dict.items()] # ("sonic-clear flowcnt-trap") @cli.command() diff --git a/config/plugins/pbh.py b/config/plugins/pbh.py index e5e5f0fdde..b6726aa154 100644 --- a/config/plugins/pbh.py +++ b/config/plugins/pbh.py @@ -7,73 +7,154 @@ """ import click +import json import ipaddress import re import utilities_common.cli as clicommon -hash_field_types = [ - 'INNER_IP_PROTOCOL', - 'INNER_L4_DST_PORT', - 'INNER_L4_SRC_PORT', - 'INNER_DST_IPV4', - 'INNER_SRC_IPV4', - 'INNER_DST_IPV6', - 'INNER_SRC_IPV6' +from show.plugins.pbh import deserialize_pbh_counters + +GRE_KEY_RE = r"^(0x){1}[a-fA-F0-9]{1,8}/(0x){1}[a-fA-F0-9]{1,8}$" + +ETHER_TYPE_RE = r"^(0x){1}[a-fA-F0-9]{1,4}$" +L4_DST_PORT_RE = ETHER_TYPE_RE +INNER_ETHER_TYPE_RE = ETHER_TYPE_RE + +IP_PROTOCOL_RE = r"^(0x){1}[a-fA-F0-9]{1,2}$" +IPV6_NEXT_HEADER_RE = IP_PROTOCOL_RE + +HASH_FIELD_VALUE_LIST = [ + "INNER_IP_PROTOCOL", + "INNER_L4_DST_PORT", + "INNER_L4_SRC_PORT", + "INNER_DST_IPV4", + "INNER_SRC_IPV4", + "INNER_DST_IPV6", + "INNER_SRC_IPV6" ] -packet_action_types = ['SET_ECMP_HASH', 'SET_LAG_HASH'] -flow_counter_state = ['DISABLED', 'ENABLED'] -gre_key_re = r"^(0x){1}[a-fA-F0-9]{1,8}/(0x){1}[a-fA-F0-9]{1,8}$" -ip_protocol_re = r"^(0x){1}[a-fA-F0-9]{1,2}$" -ipv6_next_header_re = ip_protocol_re -l4_dst_port_re = r"^(0x){1}[a-fA-F0-9]{1,4}$" -inner_ether_type_re = l4_dst_port_re -ether_type_re = l4_dst_port_re +PACKET_ACTION_VALUE_LIST = [ + "SET_ECMP_HASH", + "SET_LAG_HASH" +] -pbh_hash_field_tbl_name = 'PBH_HASH_FIELD' -pbh_hash_tbl_name = 'PBH_HASH' -pbh_table_tbl_name = 'PBH_TABLE' +FLOW_COUNTER_VALUE_LIST = [ + "DISABLED", + "ENABLED" +] +PBH_TABLE_CDB = "PBH_TABLE" +PBH_RULE_CDB = "PBH_RULE" +PBH_HASH_CDB = "PBH_HASH" +PBH_HASH_FIELD_CDB = "PBH_HASH_FIELD" -def exit_with_error(*args, **kwargs): - """ Print a message and abort CLI. """ +PBH_TABLE_INTERFACE_LIST = "interface_list" +PBH_TABLE_DESCRIPTION = "description" - click.secho(*args, **kwargs) - raise click.Abort() +PBH_RULE_PRIORITY = "priority" +PBH_RULE_GRE_KEY = "gre_key" +PBH_RULE_ETHER_TYPE = "ether_type" +PBH_RULE_IP_PROTOCOL = "ip_protocol" +PBH_RULE_IPV6_NEXT_HEADER = "ipv6_next_header" +PBH_RULE_L4_DST_PORT = "l4_dst_port" +PBH_RULE_INNER_ETHER_TYPE = "inner_ether_type" +PBH_RULE_HASH = "hash" +PBH_RULE_PACKET_ACTION = "packet_action" +PBH_RULE_FLOW_COUNTER = "flow_counter" + +PBH_HASH_HASH_FIELD_LIST = "hash_field_list" + +PBH_HASH_FIELD_HASH_FIELD = "hash_field" +PBH_HASH_FIELD_IP_MASK = "ip_mask" +PBH_HASH_FIELD_SEQUENCE_ID = "sequence_id" + +PBH_CAPABILITIES_SDB = "PBH_CAPABILITIES" + +PBH_TABLE_CAPABILITIES_KEY = "table" +PBH_RULE_CAPABILITIES_KEY = "rule" +PBH_HASH_CAPABILITIES_KEY = "hash" +PBH_HASH_FIELD_CAPABILITIES_KEY = "hash-field" + +PBH_ADD = "ADD" +PBH_UPDATE = "UPDATE" +PBH_REMOVE = "REMOVE" + +PBH_COUNTERS_LOCATION = "/tmp/.pbh_counters.txt" +# +# DB interface -------------------------------------------------------------------------------------------------------- +# def add_entry(db, table, key, data): """ Add new entry in table """ cfg = db.get_config() cfg.setdefault(table, {}) + if key in cfg[table]: - raise Exception("{} already exists".format(key)) + raise click.ClickException("{}{}{} already exists in Config DB".format( + table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key) + ) + ) cfg[table][key] = data db.set_entry(table, key, data) -def update_entry(db, table, key, data, create_if_not_exists=False): +def update_entry(db, cap, table, key, data): """ Update entry in table and validate configuration. - If attribute value in data is None, the attribute is deleted. + If field value in data is None, the field is deleted """ + field_root = "{}{}{}".format(table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key)) + cfg = db.get_config() cfg.setdefault(table, {}) - if create_if_not_exists: - cfg[table].setdefault(key, {}) - if key not in cfg[table]: - raise Exception("{} does not exist".format(key)) - - for attr, value in data.items(): - if value is None and attr in cfg[table][key]: - cfg[table][key].pop(attr) - else: - cfg[table][key][attr] = value + raise click.ClickException("{} doesn't exist in Config DB".format(field_root)) + + for field, value in data.items(): + if field not in cap: + raise click.ClickException( + "{}{}{} doesn't have a configuration capabilities".format( + field_root, db.KEY_SEPARATOR, field + ) + ) + if value is None: # HDEL + if field in cfg[table][key]: + if PBH_REMOVE in cap[field]: + cfg[table][key].pop(field) + else: + raise click.ClickException( + "Failed to remove {}{}{}: operation is prohibited".format( + field_root, db.KEY_SEPARATOR, field + ) + ) + else: + raise click.ClickException( + "Failed to remove {}{}{}: field doesn't exist".format( + field_root, db.KEY_SEPARATOR, field + ) + ) + else: # HSET + if field in cfg[table][key]: + if PBH_UPDATE not in cap[field]: + raise click.ClickException( + "Failed to update {}{}{}: operation is prohibited".format( + field_root, db.KEY_SEPARATOR, field + ) + ) + else: + if PBH_ADD not in cap[field]: + raise click.ClickException( + "Failed to add {}{}{}: operation is prohibited".format( + field_root, db.KEY_SEPARATOR, field + ) + ) + + cfg[table][key][field] = value db.set_entry(table, key, cfg[table][key]) @@ -83,41 +164,124 @@ def del_entry(db, table, key): cfg = db.get_config() cfg.setdefault(table, {}) + if key not in cfg[table]: - raise Exception("{} does not exist".format(key)) + raise click.ClickException("{}{}{} doesn't exist in Config DB".format( + table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key) + ) + ) cfg[table].pop(key) db.set_entry(table, key, None) -def ip_address_validator(ctx, param, value): - """ Check if the given ip address is valid +def is_exist_in_db(db, table, key): + """ Check if provided hash already exists in Config DB Args: - ctx: click context, - param: click parameter context, - value: value of parameter + db: reference to Config DB + table: table to search in Config DB + key: key to search in Config DB Returns: - str: ip address + bool: The return value. True for success, False otherwise """ - if value is not None: - try: - ip = ipaddress.ip_address(value) - except Exception as e: - exit_with_error("Error: invalid value '{}' for '{}' option\n{}".format(value, param.name, e), fg="red") + if (not table) or (not key): + return False - return str(ip) + if not db.get_entry(table, key): + return False + return True -def re_match(value, param_name, regexp): - """ Regexp validation of given parameter +# +# PBH validators ------------------------------------------------------------------------------------------------------ +# + +def table_name_validator(ctx, db, table_name, is_exist=True): + if is_exist: + if not is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)): + raise click.UsageError( + "Invalid value for \"TABLE_NAME\": {} is not a valid PBH table".format(table_name), ctx + ) + else: + if is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)): + raise click.UsageError( + "Invalid value for \"TABLE_NAME\": {} is a valid PBH table".format(table_name), ctx + ) + + +def rule_name_validator(ctx, db, table_name, rule_name, is_exist=True): + if is_exist: + if not is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))): + raise click.UsageError( + "Invalid value for \"RULE_NAME\": {} is not a valid PBH rule".format(rule_name), ctx + ) + else: + if is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))): + raise click.UsageError( + "Invalid value for \"RULE_NAME\": {} is a valid PBH rule".format(rule_name), ctx + ) + + +def hash_name_validator(ctx, db, hash_name, is_exist=True): + if is_exist: + if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)): + raise click.UsageError( + "Invalid value for \"HASH_NAME\": {} is not a valid PBH hash".format(hash_name), ctx + ) + else: + if is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)): + raise click.UsageError( + "Invalid value for \"HASH_NAME\": {} is a valid PBH hash".format(hash_name), ctx + ) + + +def hash_field_name_validator(ctx, db, hash_field_name, is_exist=True): + if is_exist: + if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)): + raise click.UsageError( + "Invalid value for \"HASH_FIELD_NAME\": {} is not a valid PBH hash field".format(hash_field_name), ctx + ) + else: + if is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)): + raise click.UsageError( + "Invalid value for \"HASH_FIELD_NAME\": {} is a valid PBH hash field".format(hash_field_name), ctx + ) + + +def interface_list_validator(ctx, db, interface_list): + for intf in interface_list.split(','): + if not (clicommon.is_valid_port(db, str(intf)) or clicommon.is_valid_portchannel(db, str(intf))): + raise click.UsageError( + "Invalid value for \"--interface-list\": {} is not a valid interface".format(intf), ctx + ) + + +def hash_field_list_validator(ctx, db, hash_field_list): + for hfield in hash_field_list.split(','): + if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hfield)): + raise click.UsageError( + "Invalid value for \"--hash-field-list\": {} is not a valid PBH hash field".format(hfield), ctx + ) + + +def hash_validator(ctx, db, hash): + if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash)): + raise click.UsageError( + "Invalid value for \"--hash\": {} is not a valid PBH hash".format(hash), ctx + ) + + +def re_match(ctx, param, value, regexp): + """ Regexp validation of given PBH rule parameter Args: - value: value to validate, - param_name: parameter name, + ctx: click context + param: click parameter context + value: value to validate regexp: regular expression Return: @@ -125,17 +289,19 @@ def re_match(value, param_name, regexp): """ if re.match(regexp, str(value)) is None: - exit_with_error("Error: invalid value '{}' for '{}' option".format(str(value), param_name), fg="red") + raise click.UsageError( + "Invalid value for {}: {} is ill-formed".format(param.get_error_hint(ctx), value), ctx + ) return value -def pbh_re_match_validator(ctx, param, value): +def match_validator(ctx, param, value): """ Check if PBH rule options are valid Args: - ctx: click context, - param: click parameter context, + ctx: click context + param: click parameter context value: value of parameter Returns: @@ -143,133 +309,242 @@ def pbh_re_match_validator(ctx, param, value): """ if value is not None: - if param.name == 'gre_key': - return re_match(value, param.name, gre_key_re) - elif param.name == 'ip_protocol': - return re_match(value, param.name, ip_protocol_re) - elif param.name == 'ipv6_next_header': - return re_match(value, param.name, ipv6_next_header_re) - elif param.name == 'l4_dst_port': - return re_match(value, param.name, l4_dst_port_re) - elif param.name == 'inner_ether_type': - return re_match(value, param.name, inner_ether_type_re) - elif param.name == 'ether_type': - return re_match(value, param.name, ether_type_re) - - -def is_exist_in_db(db, obj_list, conf_db_key): - """ Check if provided CLI option already exist in Config DB, - i.g in case of --hash-field-list option it will check - if 'hash-field' was previously added by - 'config pbh hash-field ...' CLI command + if param.name == PBH_RULE_GRE_KEY: + return re_match(ctx, param, value, GRE_KEY_RE) + elif param.name == PBH_RULE_ETHER_TYPE: + return re_match(ctx, param, value, ETHER_TYPE_RE) + elif param.name == PBH_RULE_IP_PROTOCOL: + return re_match(ctx, param, value, IP_PROTOCOL_RE) + elif param.name == PBH_RULE_IPV6_NEXT_HEADER: + return re_match(ctx, param, value, IPV6_NEXT_HEADER_RE) + elif param.name == PBH_RULE_L4_DST_PORT: + return re_match(ctx, param, value, L4_DST_PORT_RE) + elif param.name == PBH_RULE_INNER_ETHER_TYPE: + return re_match(ctx, param, value, INNER_ETHER_TYPE_RE) + + +def ip_mask_validator(ctx, param, value): + """ Check if PBH hash field IP mask option is valid Args: - db: reference to Config DB, - obj_list: value of 'click' option - conf_db_key: key to search in Config DB - """ - - if obj_list is None: - return True - - table = db.cfgdb.get_table(conf_db_key) - correct_list = list(table.keys()) + ctx: click context + param: click parameter context + value: value of parameter - splited_list = obj_list.split(',') + Returns: + str: validated parameter + """ - for elem in splited_list: - if elem not in correct_list: - return False + if value is not None: + try: + ip = ipaddress.ip_address(value) + except Exception as err: + raise click.UsageError("Invalid value for {}: {}".format(param.get_error_hint(ctx), err), ctx) - return True + return str(ip) -def ip_mask_hash_field_correspondence_validator(ip_mask, hash_field): - """ Check if the --ip-mask option are correspond to - the --hash-field option +def hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask): + """ Function to validate whether --hash-field value + corresponds to the --ip-mask value Args: - ip_mask: ip address or None, - hash_field: hash field value, which was configured before + ctx: click context + hash_field: native hash field value + ip_mask: ip address or None """ - hf_v4 = ['INNER_DST_IPV4', 'INNER_SRC_IPV4'] - hf_v6 = ['INNER_DST_IPV6', 'INNER_SRC_IPV6'] - hf_v4_and_v6 = hf_v4 + hf_v6 - hf_no_ip = ['INNER_IP_PROTOCOL', 'INNER_L4_DST_PORT', 'INNER_L4_SRC_PORT'] + hf_no_ip = ["INNER_IP_PROTOCOL", "INNER_L4_DST_PORT", "INNER_L4_SRC_PORT"] - if (hash_field in hf_no_ip) and (ip_mask): - exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + if ip_mask is None: + if hash_field not in hf_no_ip: + raise click.UsageError( + "Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when no \"--ip-mask\" is provided)".format( + hash_field, ", ".join(hf_no_ip) + ), ctx + ) + return + + hf_v4 = ["INNER_DST_IPV4", "INNER_SRC_IPV4"] + hf_v6 = ["INNER_DST_IPV6", "INNER_SRC_IPV6"] - if (hash_field in hf_v4_and_v6) and (ip_mask is None): - exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + if not ((hash_field in hf_v4) or (hash_field in hf_v6)): + raise click.UsageError( + "Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when \"--ip-mask\" is provided)".format( + hash_field, ", ".join(hf_v4 + hf_v6) + ), ctx + ) - if (ip_mask is not None): - ip_addr_version = ipaddress.ip_address(ip_mask).version + ip_ver = ipaddress.ip_address(ip_mask).version - if (hash_field in hf_v4) and (ip_addr_version != 4): - exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + if (hash_field in hf_v4) and (ip_ver != 4): + raise click.UsageError( + "Invalid value for \"--ip-mask\": {} is not compatible with {}".format( + ip_mask, hash_field + ), ctx + ) - if (hash_field in hf_v6) and (ip_addr_version != 6): - exit_with_error("Error: the value of '--hash-field'='{}' is NOT compatible with the value of '--ip-mask'='{}'".format(hash_field, ip_mask), fg='red') + if (hash_field in hf_v6) and (ip_ver != 6): + raise click.UsageError( + "Invalid value for \"--ip-mask\": {} is not compatible with {}".format( + ip_mask, hash_field + ), ctx + ) -def ip_mask_hash_field_update_validator(db, hash_field_name, ip_mask, hash_field): - """ Function to validate --ip-mask and --hash-field - correspondence, during update flow +def hash_field_to_ip_mask_validator(ctx, db, hash_field_name, hash_field, ip_mask, is_update=True): + """ Function to validate --hash-field and --ip-mask + correspondence, during add/update flow Args: - db: reference to CONFIG DB, - hash_field_name: name of the hash-field, - ip_mask: ip address, + ctx: click context + db: reference to Config DB + hash_field_name: name of the hash-field hash_field: native hash field value + ip_mask: ip address + is_update: update flow flag """ - if (ip_mask is None) and (hash_field is None): + if not is_update: + hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask) + return + + if (hash_field is None) and (ip_mask is None): return - table = db.cfgdb.get_table(pbh_hash_field_tbl_name) - hash_field_obj = table[hash_field_name] + if (hash_field is not None) and (ip_mask is not None): + hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask) + return - if (ip_mask is None) and (hash_field is not None): + hf_obj = db.get_entry(str(PBH_HASH_FIELD_CDB), str(hash_field_name)) + if not hf_obj: + raise click.ClickException( + "Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH hash field".format( + hash_field_name + ) + ) + + if hash_field is None: + if PBH_HASH_FIELD_HASH_FIELD not in hf_obj: + raise click.ClickException( + "Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH field".format( + PBH_HASH_FIELD_HASH_FIELD + ) + ) + hash_field_to_ip_mask_correspondence_validator(ctx, hf_obj[PBH_HASH_FIELD_HASH_FIELD], ip_mask) + else: + if PBH_HASH_FIELD_IP_MASK in hf_obj: + hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, hf_obj[PBH_HASH_FIELD_IP_MASK]) + else: + hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask) + +# +# PBH helpers --------------------------------------------------------------------------------------------------------- +# + +def serialize_pbh_counters(obj): + """ Helper that performs PBH counters serialization. + + in = { + ('pbh_table1', 'pbh_rule1'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'}, + ... + ('pbh_tableN', 'pbh_ruleN'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'} + } + + out = [ + { + "key": ["pbh_table1", "pbh_rule1"], + "value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"} + }, + ... + { + "key": ["pbh_tableN", "pbh_ruleN"], + "value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"} + } + ] - try: - ip_mask = hash_field_obj['ip_mask'] - except Exception as e: - ip_mask = None + Args: + obj: counters dict. + """ + + def remap_keys(obj): + return [{'key': k, 'value': v} for k, v in obj.items()] - ip_mask_hash_field_correspondence_validator(ip_mask, hash_field) + try: + with open(PBH_COUNTERS_LOCATION, 'w') as f: + json.dump(remap_keys(obj), f) + except IOError as err: + pass - if (ip_mask is not None) and (hash_field is None): - hash_field = hash_field_obj['hash_field'] +def update_pbh_counters(table_name, rule_name): + """ Helper that performs PBH counters update """ + pbh_counters = deserialize_pbh_counters() + key_to_del = table_name, rule_name - ip_mask_hash_field_correspondence_validator(ip_mask, hash_field) + if key_to_del in pbh_counters: + del pbh_counters[key_to_del] + serialize_pbh_counters(pbh_counters) -def interfaces_list_validator(db, interface_list, is_update): - if is_update and (interface_list is None): - return +def pbh_capabilities_query(db, key): + """ Query PBH capabilities """ - is_error = False - interfaces_splited = interface_list.split(',') - - for intf in interfaces_splited: - if intf.startswith('Ethernet'): - if not clicommon.is_valid_port(db.cfgdb, intf): - is_error = True - break - elif intf.startswith('PortChannel'): - if not clicommon.is_valid_portchannel(db.cfgdb, intf): - is_error = True - break - else: - is_error = True - break + sdb_id = db.STATE_DB + sdb_sep = db.get_db_separator(sdb_id) + + cap_map = db.get_all(sdb_id, "{}{}{}".format(str(PBH_CAPABILITIES_SDB), sdb_sep, str(key))) + if not cap_map: + return None + + return cap_map + + +def pbh_match_count(db, table, key, data): + """ Count PBH rule match fields """ - if is_error: - exit_with_error("Error: invalid value '{}', for '--interface-list' option".format(interface_list), fg="red") + field_map = db.get_entry(table, key) + match_total = 0 + match_count = 0 + + if PBH_RULE_GRE_KEY in field_map: + if PBH_RULE_GRE_KEY in data: + match_count += 1 + match_total += 1 + if PBH_RULE_ETHER_TYPE in field_map: + if PBH_RULE_ETHER_TYPE in data: + match_count += 1 + match_total += 1 + if PBH_RULE_IP_PROTOCOL in field_map: + if PBH_RULE_IP_PROTOCOL in data: + match_count += 1 + match_total += 1 + if PBH_RULE_IPV6_NEXT_HEADER in field_map: + if PBH_RULE_IPV6_NEXT_HEADER in data: + match_count += 1 + match_total += 1 + if PBH_RULE_L4_DST_PORT in field_map: + if PBH_RULE_L4_DST_PORT in data: + match_count += 1 + match_total += 1 + if PBH_RULE_INNER_ETHER_TYPE in field_map: + if PBH_RULE_INNER_ETHER_TYPE in data: + match_count += 1 + match_total += 1 + + return match_total, match_count + + +def exit_with_error(*args, **kwargs): + """ Print a message and abort CLI """ + + click.secho(*args, **kwargs) + raise click.Abort() + +# +# PBH CLI ------------------------------------------------------------------------------------------------------------- +# @click.group( name='pbh', @@ -280,6 +555,9 @@ def PBH(): pass +# +# PBH hash field ------------------------------------------------------------------------------------------------------ +# @PBH.group( name="hash-field", @@ -295,43 +573,50 @@ def PBH_HASH_FIELD(): @click.argument( "hash-field-name", nargs=1, - required=True, + required=True ) @click.option( "--hash-field", help="Configures native hash field for this hash field", required=True, - type=click.Choice(hash_field_types) + type=click.Choice(HASH_FIELD_VALUE_LIST) ) @click.option( "--ip-mask", - help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_SRC_IPV6 or INNER_SRC_IPV6""", - callback=ip_address_validator, + help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6""", + callback=ip_mask_validator ) @click.option( "--sequence-id", help="Configures in which order the fields are hashed and defines which fields should be associative", required=True, - type=click.INT, + type=click.INT ) @clicommon.pass_db def PBH_HASH_FIELD_add(db, hash_field_name, hash_field, ip_mask, sequence_id): """ Add object to PBH_HASH_FIELD table """ - ip_mask_hash_field_correspondence_validator(ip_mask, hash_field) + ctx = click.get_current_context() + + hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name, False) + hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask, False) - table = pbh_hash_field_tbl_name - key = hash_field_name + table = str(PBH_HASH_FIELD_CDB) + key = str(hash_field_name) data = {} + if hash_field is not None: - data["hash_field"] = hash_field + data[PBH_HASH_FIELD_HASH_FIELD] = hash_field if ip_mask is not None: - data["ip_mask"] = ip_mask + data[PBH_HASH_FIELD_IP_MASK] = ip_mask if sequence_id is not None: - data["sequence_id"] = sequence_id + data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id + + if not data: + exit_with_error("Error: Failed to add PBH hash field: options are not provided", fg="red") try: - add_entry(db.cfgdb, table, key, data) + add_entry(db.cfgdb_pipe, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") @@ -340,41 +625,52 @@ def PBH_HASH_FIELD_add(db, hash_field_name, hash_field, ip_mask, sequence_id): @click.argument( "hash-field-name", nargs=1, - required=True, + required=True ) @click.option( "--hash-field", help="Configures native hash field for this hash field", - type=click.Choice(hash_field_types) + type=click.Choice(HASH_FIELD_VALUE_LIST) ) @click.option( "--ip-mask", - help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_SRC_IPV6 or INNER_SRC_IPV6 """, - callback=ip_address_validator, + help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6 """, + callback=ip_mask_validator ) @click.option( "--sequence-id", help="Configures in which order the fields are hashed and defines which fields should be associative", - type=click.INT, + type=click.INT ) @clicommon.pass_db def PBH_HASH_FIELD_update(db, hash_field_name, hash_field, ip_mask, sequence_id): """ Update object in PBH_HASH_FIELD table """ - ip_mask_hash_field_update_validator(db, hash_field_name, ip_mask, hash_field) + ctx = click.get_current_context() - table = pbh_hash_field_tbl_name - key = hash_field_name + hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name) + hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask) + + table = str(PBH_HASH_FIELD_CDB) + key = str(hash_field_name) data = {} + if hash_field is not None: - data["hash_field"] = hash_field + data[PBH_HASH_FIELD_HASH_FIELD] = hash_field if ip_mask is not None: - data["ip_mask"] = ip_mask + data[PBH_HASH_FIELD_IP_MASK] = ip_mask if sequence_id is not None: - data["sequence_id"] = sequence_id + data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id + + if not data: + exit_with_error("Error: Failed to update PBH hash field: options are not provided", fg="red") + + cap = pbh_capabilities_query(db.db, PBH_HASH_FIELD_CAPABILITIES_KEY) + if cap is None: + exit_with_error("Error: Failed to query PBH hash field capabilities: configuration is not available", fg="red") try: - update_entry(db.cfgdb, table, key, data) + update_entry(db.cfgdb_pipe, cap, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") @@ -383,19 +679,27 @@ def PBH_HASH_FIELD_update(db, hash_field_name, hash_field, ip_mask, sequence_id) @click.argument( "hash-field-name", nargs=1, - required=True, + required=True ) @clicommon.pass_db def PBH_HASH_FIELD_delete(db, hash_field_name): """ Delete object from PBH_HASH_FIELD table """ - table = pbh_hash_field_tbl_name - key = hash_field_name + ctx = click.get_current_context() + + hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name) + + table = str(PBH_HASH_FIELD_CDB) + key = str(hash_field_name) + try: - del_entry(db.cfgdb, table, key) + del_entry(db.cfgdb_pipe, table, key) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") +# +# PBH hash ------------------------------------------------------------------------------------------------------------ +# @PBH.group( name="hash", @@ -411,28 +715,34 @@ def PBH_HASH(): @click.argument( "hash-name", nargs=1, - required=True, + required=True ) @click.option( "--hash-field-list", help="The list of hash fields to apply with this hash", - required=True, + required=True ) @clicommon.pass_db def PBH_HASH_add(db, hash_name, hash_field_list): """ Add object to PBH_HASH table """ - if not is_exist_in_db(db, hash_field_list, pbh_hash_field_tbl_name): - exit_with_error("Error: invalid value '{}' for '--hash-field-list' option".format(hash_field_list), fg="red") + ctx = click.get_current_context() + + hash_name_validator(ctx, db.cfgdb_pipe, hash_name, False) - table = pbh_hash_tbl_name - key = hash_name + table = str(PBH_HASH_CDB) + key = str(hash_name) data = {} + if hash_field_list is not None: - data["hash_field_list"] = hash_field_list.split(",") + hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list) + data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",") + + if not data: + exit_with_error("Error: Failed to add PBH hash: options are not provided", fg="red") try: - add_entry(db.cfgdb, table, key, data) + add_entry(db.cfgdb_pipe, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") @@ -441,27 +751,37 @@ def PBH_HASH_add(db, hash_name, hash_field_list): @click.argument( "hash-name", nargs=1, - required=True, + required=True ) @click.option( "--hash-field-list", - help="The list of hash fields to apply with this hash", + help="The list of hash fields to apply with this hash" ) @clicommon.pass_db def PBH_HASH_update(db, hash_name, hash_field_list): """ Update object in PBH_HASH table """ - if not is_exist_in_db(db, hash_field_list, pbh_hash_field_tbl_name): - exit_with_error("Error: invalid value '{}' for '--hash-field-list' option".format(hash_field_list), fg="red") + ctx = click.get_current_context() + + hash_name_validator(ctx, db.cfgdb_pipe, hash_name) - table = pbh_hash_tbl_name - key = hash_name + table = str(PBH_HASH_CDB) + key = str(hash_name) data = {} + if hash_field_list is not None: - data["hash_field_list"] = hash_field_list.split(",") + hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list) + data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",") + + if not data: + exit_with_error("Error: Failed to update PBH hash: options are not provided", fg="red") + + cap = pbh_capabilities_query(db.db, PBH_HASH_CAPABILITIES_KEY) + if cap is None: + exit_with_error("Error: Failed to query PBH hash capabilities: configuration is not available", fg="red") try: - update_entry(db.cfgdb, table, key, data) + update_entry(db.cfgdb_pipe, cap, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") @@ -470,19 +790,27 @@ def PBH_HASH_update(db, hash_name, hash_field_list): @click.argument( "hash-name", nargs=1, - required=True, + required=True ) @clicommon.pass_db def PBH_HASH_delete(db, hash_name): """ Delete object from PBH_HASH table """ - table = pbh_hash_tbl_name - key = hash_name + ctx = click.get_current_context() + + hash_name_validator(ctx, db.cfgdb_pipe, hash_name) + + table = str(PBH_HASH_CDB) + key = str(hash_name) + try: - del_entry(db.cfgdb, table, key) + del_entry(db.cfgdb_pipe, table, key) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") +# +# PBH rule ------------------------------------------------------------------------------------------------------------ +# @PBH.group( name="rule", @@ -498,63 +826,63 @@ def PBH_RULE(): @click.argument( "table-name", nargs=1, - required=True, + required=True ) @click.argument( "rule-name", nargs=1, - required=True, + required=True ) @click.option( "--priority", help="Configures priority for this rule", required=True, - type=click.INT, + type=click.INT ) @click.option( "--gre-key", - help="Configures packet match: GRE key (value/mask)", - callback=pbh_re_match_validator, + help="Configures packet match for this rule: GRE key (value/mask)", + callback=match_validator ) @click.option( "--ether-type", help="Configures packet match for this rule: EtherType (IANA Ethertypes)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--ip-protocol", help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--ipv6-next-header", help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--l4-dst-port", help="Configures packet match for this rule: L4 destination port", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--inner-ether-type", help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--hash", - required=True, help="The hash to apply with this rule", + required=True ) @click.option( "--packet-action", help="Configures packet action for this rule", - type=click.Choice(packet_action_types) + type=click.Choice(PACKET_ACTION_VALUE_LIST) ) @click.option( "--flow-counter", - help="Enables/Disables packet/byte counter", - type=click.Choice(flow_counter_state) + help="Enables/Disables packet/byte counter for this rule", + type=click.Choice(FLOW_COUNTER_VALUE_LIST) ) @clicommon.pass_db def PBH_RULE_add( @@ -574,103 +902,139 @@ def PBH_RULE_add( ): """ Add object to PBH_RULE table """ - if not is_exist_in_db(db, table_name, pbh_table_tbl_name): - exit_with_error("Error: invalid value '{}' for 'table-name' argument".format(table_name), fg="red") - if not is_exist_in_db(db, hash, pbh_hash_tbl_name): - exit_with_error("Error: invalid value '{}' for '--hash' option".format(hash), fg="red") + ctx = click.get_current_context() + + table_name_validator(ctx, db.cfgdb_pipe, table_name) + rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name, False) - table = "PBH_RULE" - key = table_name, rule_name + table = str(PBH_RULE_CDB) + key = (str(table_name), str(rule_name)) data = {} + + match_count = 0 + if priority is not None: - data["priority"] = priority + data[PBH_RULE_PRIORITY] = priority if gre_key is not None: - data["gre_key"] = gre_key + data[PBH_RULE_GRE_KEY] = gre_key + match_count += 1 if ether_type is not None: - data["ether_type"] = ether_type + data[PBH_RULE_ETHER_TYPE] = ether_type + match_count += 1 if ip_protocol is not None: - data["ip_protocol"] = ip_protocol + data[PBH_RULE_IP_PROTOCOL] = ip_protocol + match_count += 1 if ipv6_next_header is not None: - data["ipv6_next_header"] = ipv6_next_header + data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header + match_count += 1 if l4_dst_port is not None: - data["l4_dst_port"] = l4_dst_port + data[PBH_RULE_L4_DST_PORT] = l4_dst_port + match_count += 1 if inner_ether_type is not None: - data["inner_ether_type"] = inner_ether_type + data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type + match_count += 1 if hash is not None: - data["hash"] = hash + hash_validator(ctx, db.cfgdb_pipe, hash) + data[PBH_RULE_HASH] = hash if packet_action is not None: - data["packet_action"] = packet_action + data[PBH_RULE_PACKET_ACTION] = packet_action if flow_counter is not None: - data["flow_counter"] = flow_counter + data[PBH_RULE_FLOW_COUNTER] = flow_counter + + if not data: + exit_with_error("Error: Failed to add PBH rule: options are not provided", fg="red") + + if match_count == 0: + exit_with_error("Error: Failed to add PBH rule: match options are not provided", fg="red") try: - add_entry(db.cfgdb, table, key, data) + add_entry(db.cfgdb_pipe, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") -@PBH_RULE.command(name="update") +@PBH_RULE.group( + name="update", + cls=clicommon.AliasedGroup +) +def PBH_RULE_update(): + """ Update object in PBH_RULE table """ + + pass + + +@PBH_RULE_update.group( + name="field", + cls=clicommon.AliasedGroup +) +def PBH_RULE_update_field(): + """ Update object field in PBH_RULE table """ + + pass + + +@PBH_RULE_update_field.command(name="set") @click.argument( "table-name", nargs=1, - required=True, + required=True ) @click.argument( "rule-name", nargs=1, - required=True, + required=True ) @click.option( "--priority", help="Configures priority for this rule", - type=click.INT, + type=click.INT ) @click.option( "--gre-key", - help="Configures packet match: GRE key (value/mask)", - callback=pbh_re_match_validator, + help="Configures packet match for this rule: GRE key (value/mask)", + callback=match_validator ) @click.option( "--ether-type", help="Configures packet match for this rule: EtherType (IANA Ethertypes)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--ip-protocol", help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--ipv6-next-header", help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--l4-dst-port", help="Configures packet match for this rule: L4 destination port", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--inner-ether-type", help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)", - callback=pbh_re_match_validator, + callback=match_validator ) @click.option( "--hash", - help="The hash to apply with this rule", + help="The hash to apply with this rule" ) @click.option( "--packet-action", help="Configures packet action for this rule", - type=click.Choice(packet_action_types) + type=click.Choice(PACKET_ACTION_VALUE_LIST) ) @click.option( "--flow-counter", - help="Enables/Disables packet/byte counter", - type=click.Choice(flow_counter_state) + help="Enables/Disables packet/byte counter for this rule", + type=click.Choice(FLOW_COUNTER_VALUE_LIST) ) @clicommon.pass_db -def PBH_RULE_update( +def PBH_RULE_update_field_set( db, table_name, rule_name, @@ -685,39 +1049,178 @@ def PBH_RULE_update( packet_action, flow_counter ): - """ Update object in PBH_RULE table """ + """ Set object field in PBH_RULE table """ - if not is_exist_in_db(db, table_name, pbh_table_tbl_name): - exit_with_error("Error: invalid value '{}' for 'table-name' argument".format(table_name), fg="red") - if not is_exist_in_db(db, hash, pbh_hash_tbl_name): - exit_with_error("Error: invalid value '{}' for '--hash' option".format(hash), fg="red") + ctx = click.get_current_context() - table = "PBH_RULE" - key = table_name, rule_name + table_name_validator(ctx, db.cfgdb_pipe, table_name) + rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name) + + table = str(PBH_RULE_CDB) + key = (str(table_name), str(rule_name)) data = {} + if priority is not None: - data["priority"] = priority + data[PBH_RULE_PRIORITY] = priority if gre_key is not None: - data["gre_key"] = gre_key + data[PBH_RULE_GRE_KEY] = gre_key if ether_type is not None: - data["ether_type"] = ether_type + data[PBH_RULE_ETHER_TYPE] = ether_type if ip_protocol is not None: - data["ip_protocol"] = ip_protocol + data[PBH_RULE_IP_PROTOCOL] = ip_protocol if ipv6_next_header is not None: - data["ipv6_next_header"] = ipv6_next_header + data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header if l4_dst_port is not None: - data["l4_dst_port"] = l4_dst_port + data[PBH_RULE_L4_DST_PORT] = l4_dst_port if inner_ether_type is not None: - data["inner_ether_type"] = inner_ether_type + data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type if hash is not None: - data["hash"] = hash + hash_validator(ctx, db.cfgdb_pipe, hash) + data[PBH_RULE_HASH] = hash if packet_action is not None: - data["packet_action"] = packet_action + data[PBH_RULE_PACKET_ACTION] = packet_action if flow_counter is not None: - data["flow_counter"] = flow_counter + data[PBH_RULE_FLOW_COUNTER] = flow_counter + + if not data: + exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red") + + cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY) + if cap is None: + exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red") try: - update_entry(db.cfgdb, table, key, data) + update_entry(db.cfgdb_pipe, cap, table, key, data) + if data.get(PBH_RULE_FLOW_COUNTER, "") == "DISABLED": + update_pbh_counters(table_name, rule_name) + except Exception as err: + exit_with_error("Error: {}".format(err), fg="red") + + +@PBH_RULE_update_field.command(name="del") +@click.argument( + "table-name", + nargs=1, + required=True +) +@click.argument( + "rule-name", + nargs=1, + required=True +) +@click.option( + "--priority", + help="Deletes priority for this rule", + is_flag=True +) +@click.option( + "--gre-key", + help="Deletes packet match for this rule: GRE key (value/mask)", + is_flag=True +) +@click.option( + "--ether-type", + help="Deletes packet match for this rule: EtherType (IANA Ethertypes)", + is_flag=True +) +@click.option( + "--ip-protocol", + help="Deletes packet match for this rule: IP protocol (IANA Protocol Numbers)", + is_flag=True +) +@click.option( + "--ipv6-next-header", + help="Deletes packet match for this rule: IPv6 Next header (IANA Protocol Numbers)", + is_flag=True +) +@click.option( + "--l4-dst-port", + help="Deletes packet match for this rule: L4 destination port", + is_flag=True +) +@click.option( + "--inner-ether-type", + help="Deletes packet match for this rule: inner EtherType (IANA Ethertypes)", + is_flag=True +) +@click.option( + "--hash", + help="Deletes hash for this rule", + is_flag=True +) +@click.option( + "--packet-action", + help="Deletes packet action for this rule", + is_flag=True +) +@click.option( + "--flow-counter", + help="Deletes packet/byte counter for this rule", + is_flag=True +) +@clicommon.pass_db +def PBH_RULE_update_field_del( + db, + table_name, + rule_name, + priority, + gre_key, + ether_type, + ip_protocol, + ipv6_next_header, + l4_dst_port, + inner_ether_type, + hash, + packet_action, + flow_counter +): + """ Delete object field from PBH_RULE table """ + + ctx = click.get_current_context() + + table_name_validator(ctx, db.cfgdb_pipe, table_name) + rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name) + + table = str(PBH_RULE_CDB) + key = (str(table_name), str(rule_name)) + data = {} + + if priority: + data[PBH_RULE_PRIORITY] = None + if gre_key: + data[PBH_RULE_GRE_KEY] = None + if ether_type: + data[PBH_RULE_ETHER_TYPE] = None + if ip_protocol: + data[PBH_RULE_IP_PROTOCOL] = None + if ipv6_next_header: + data[PBH_RULE_IPV6_NEXT_HEADER] = None + if l4_dst_port: + data[PBH_RULE_L4_DST_PORT] = None + if inner_ether_type: + data[PBH_RULE_INNER_ETHER_TYPE] = None + if hash: + data[PBH_RULE_HASH] = None + if packet_action: + data[PBH_RULE_PACKET_ACTION] = None + if flow_counter: + data[PBH_RULE_FLOW_COUNTER] = None + + if not data: + exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red") + + match_total, match_count = pbh_match_count(db.cfgdb_pipe, table, key, data) + if match_count >= match_total: + exit_with_error("Error: Failed to update PBH rule: match options are required", fg="red") + + cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY) + if cap is None: + exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red") + + try: + update_entry(db.cfgdb_pipe, cap, table, key, data) + if flow_counter: + update_pbh_counters(table_name, rule_name) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") @@ -726,24 +1229,34 @@ def PBH_RULE_update( @click.argument( "table-name", nargs=1, - required=True, + required=True ) @click.argument( "rule-name", nargs=1, - required=True, + required=True ) @clicommon.pass_db def PBH_RULE_delete(db, table_name, rule_name): """ Delete object from PBH_RULE table """ - table = "PBH_RULE" - key = table_name, rule_name + ctx = click.get_current_context() + + table_name_validator(ctx, db.cfgdb_pipe, table_name) + rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name) + + table = str(PBH_RULE_CDB) + key = (str(table_name), str(rule_name)) + try: - del_entry(db.cfgdb, table, key) + del_entry(db.cfgdb_pipe, table, key) + update_pbh_counters(table_name, rule_name) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") +# +# PBH table ----------------------------------------------------------------------------------------------------------- +# @PBH.group( name="table", @@ -759,34 +1272,41 @@ def PBH_TABLE(): @click.argument( "table-name", nargs=1, - required=True, -) -@click.option( - "--description", - help="The description of this table", - required=True, + required=True ) @click.option( "--interface-list", help="Interfaces to which this table is applied", - required=True, + required=True +) +@click.option( + "--description", + help="The description of this table", + required=True ) @clicommon.pass_db -def PBH_TABLE_add(db, table_name, description, interface_list): +def PBH_TABLE_add(db, table_name, interface_list, description): """ Add object to PBH_TABLE table """ - interfaces_list_validator(db, interface_list, is_update=False) + ctx = click.get_current_context() + + table_name_validator(ctx, db.cfgdb_pipe, table_name, False) - table = "PBH_TABLE" - key = table_name + table = str(PBH_TABLE_CDB) + key = str(table_name) data = {} - if description is not None: - data["description"] = description + if interface_list is not None: - data["interface_list"] = interface_list.split(",") + interface_list_validator(ctx, db.cfgdb_pipe, interface_list) + data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",") + if description is not None: + data[PBH_TABLE_DESCRIPTION] = description + + if not data: + exit_with_error("Error: Failed to add PBH table: options are not provided", fg="red") try: - add_entry(db.cfgdb, table, key, data) + add_entry(db.cfgdb_pipe, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") @@ -795,32 +1315,43 @@ def PBH_TABLE_add(db, table_name, description, interface_list): @click.argument( "table-name", nargs=1, - required=True, + required=True ) @click.option( - "--description", - help="The description of this table", + "--interface-list", + help="Interfaces to which this table is applied" ) @click.option( - "--interface-list", - help="Interfaces to which this table is applied", + "--description", + help="The description of this table", ) @clicommon.pass_db -def PBH_TABLE_update(db, table_name, description, interface_list): +def PBH_TABLE_update(db, table_name, interface_list, description): """ Update object in PBH_TABLE table """ - interfaces_list_validator(db, interface_list, is_update=True) + ctx = click.get_current_context() - table = "PBH_TABLE" - key = table_name + table_name_validator(ctx, db.cfgdb_pipe, table_name) + + table = str(PBH_TABLE_CDB) + key = str(table_name) data = {} - if description is not None: - data["description"] = description + if interface_list is not None: - data["interface_list"] = interface_list.split(",") + interface_list_validator(ctx, db.cfgdb_pipe, interface_list) + data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",") + if description is not None: + data[PBH_TABLE_DESCRIPTION] = description + + if not data: + exit_with_error("Error: Failed to update PBH table: options are not provided", fg="red") + + cap = pbh_capabilities_query(db.db, PBH_TABLE_CAPABILITIES_KEY) + if cap is None: + exit_with_error("Error: Failed to query PBH table capabilities: configuration is not available", fg="red") try: - update_entry(db.cfgdb, table, key, data) + update_entry(db.cfgdb_pipe, cap, table, key, data) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") @@ -835,17 +1366,24 @@ def PBH_TABLE_update(db, table_name, description, interface_list): def PBH_TABLE_delete(db, table_name): """ Delete object from PBH_TABLE table """ - table = "PBH_TABLE" - key = table_name + ctx = click.get_current_context() + + table_name_validator(ctx, db.cfgdb_pipe, table_name) + + table = str(PBH_TABLE_CDB) + key = str(table_name) + try: - del_entry(db.cfgdb, table, key) + del_entry(db.cfgdb_pipe, table, key) except Exception as err: exit_with_error("Error: {}".format(err), fg="red") +# +# PBH plugin ---------------------------------------------------------------------------------------------------------- +# def register(cli): cli_node = PBH if cli_node.name in cli.commands: raise Exception("{} already exists in CLI".format(cli_node.name)) cli.add_command(PBH) - diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index 95115d976d..e50f6507a5 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -93,7 +93,7 @@ def PBH_HASH_FIELD(db): body = [] - table = db.cfgdb.get_table(pbh_hash_field_tbl_name) + table = db.cfgdb_pipe.get_table(pbh_hash_field_tbl_name) for key in natsort.natsorted(table): entry = table[key] @@ -158,7 +158,7 @@ def PBH_HASH(db): body = [] - table = db.cfgdb.get_table(pbh_hash_tbl_name) + table = db.cfgdb_pipe.get_table(pbh_hash_tbl_name) for key in natsort.natsorted(table): entry = table[key] if not isinstance(key, tuple): @@ -203,7 +203,7 @@ def PBH_RULE(db): body = [] - table = db.cfgdb.get_table(pbh_rule_tbl_name) + table = db.cfgdb_pipe.get_table(pbh_rule_tbl_name) for key in natsort.natsorted(table): entry = table[key] if not isinstance(key, tuple): @@ -322,7 +322,7 @@ def PBH_TABLE(db): body = [] - table = db.cfgdb.get_table(pbh_table_tbl_name) + table = db.cfgdb_pipe.get_table(pbh_table_tbl_name) for key in natsort.natsorted(table): entry = table[key] if not isinstance(key, tuple): @@ -374,12 +374,12 @@ def PBH_STATISTICS(db): body = [] - pbh_rules = db.cfgdb.get_table(pbh_rule_tbl_name) + pbh_rules = db.cfgdb_pipe.get_table(pbh_rule_tbl_name) pbh_counters = read_pbh_counters(pbh_rules) - saved_pbh_counters = read_saved_pbh_counters() + saved_pbh_counters = deserialize_pbh_counters() for key in pbh_rules: - if pbh_rules[key]['flow_counter'] == 'ENABLED': + if pbh_rules[key].get("flow_counter", "") == "ENABLED": row = [ key[0], key[1], @@ -403,20 +403,43 @@ def get_counter_value(pbh_counters, saved_pbh_counters, key, type): return str(pbh_counters[key][type]) -def remap_keys(obj_list): - res = {} - for e in obj_list: - res[e['key'][0], e['key'][1]] = e['value'] - return res +def deserialize_pbh_counters(): + """ Helper that performs PBH counters deserialization. + in = [ + { + "key": ["pbh_table1", "pbh_rule1"], + "value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"} + }, + ... + { + "key": ["pbh_tableN", "pbh_ruleN"], + "value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"} + } + ] + + out = { + ('pbh_table1', 'pbh_rule1'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'}, + ... + ('pbh_tableN', 'pbh_ruleN'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'} + } + + Returns: + obj: counters dict. + """ + + def remap_keys(obj): + res = {} + for e in obj: + res[e['key'][0], e['key'][1]] = e['value'] + return res -def read_saved_pbh_counters(): if os.path.isfile(PBH_COUNTERS_LOCATION): try: - with open(PBH_COUNTERS_LOCATION) as fp: - return remap_keys(json.load(fp)) - except Exception: - return {} + with open(PBH_COUNTERS_LOCATION, 'r') as f: + return remap_keys(json.load(f)) + except Exception as err: + pass return {} @@ -484,4 +507,3 @@ def register(cli): if cli_node.name in cli.commands: raise Exception(f"{cli_node.name} already exists in CLI") cli.add_command(PBH) - diff --git a/tests/mock_tables/dbconnector.py b/tests/mock_tables/dbconnector.py index 30a9f5a653..80d74cafd7 100644 --- a/tests/mock_tables/dbconnector.py +++ b/tests/mock_tables/dbconnector.py @@ -2,6 +2,7 @@ import json import os import sys +import re from unittest import mock import mockredis @@ -119,6 +120,41 @@ def __init__(self, *args, **kwargs): for attr, value in v.items(): self.hset(k, attr, value) + # Patch mockredis/mockredis/client.py + # The offical implementation assume decode_responses=False + def _common_scan(self, values_function, cursor='0', match=None, count=10, key=None): + """ + Common scanning skeleton. + + :param key: optional function used to identify what 'match' is applied to + """ + if count is None: + count = 10 + cursor = int(cursor) + count = int(count) + if not count: + raise ValueError('if specified, count must be > 0: %s' % count) + + values = values_function() + if cursor + count >= len(values): + # we reached the end, back to zero + result_cursor = 0 + else: + result_cursor = cursor + count + + values = values[cursor:cursor+count] + + if match is not None: + if self.decode_responses: + regex = re.compile('^' + re.escape(self._encode(match)).replace('\\*', '.*') + '$') + else: + regex = re.compile(b'^' + re.escape(self._encode(match)).replace(b'\\*', b'.*') + b'$') + if not key: + key = lambda v: v + values = [v for v in values if regex.match(key(v))] + + return [result_cursor, values] + # Patch mockredis/mockredis/client.py # The offical implementation assume decode_responses=False # Here we detect the option and decode after doing encode @@ -128,7 +164,7 @@ def _encode(self, value): value = super(SwssSyncClient, self)._encode(value) if self.decode_responses: - return value.decode('utf-8') + return value.decode('utf-8') # Patch mockredis/mockredis/client.py # The official implementation will filter out keys with a slash '/' diff --git a/tests/pbh_input/assert_show_output.py b/tests/pbh_input/assert_show_output.py index 5b67403a17..7a701ba4bc 100644 --- a/tests/pbh_input/assert_show_output.py +++ b/tests/pbh_input/assert_show_output.py @@ -77,6 +77,7 @@ pbh_table2 vxlan 300 400 """ + show_pbh_statistics_updated="""\ TABLE RULE RX PACKETS COUNT RX BYTES COUNT ---------- ------ ------------------ ---------------- @@ -84,9 +85,25 @@ pbh_table2 vxlan 400 400 """ + show_pbh_statistics_after_disabling_rule="""\ TABLE RULE RX PACKETS COUNT RX BYTES COUNT ---------- ------ ------------------ ---------------- pbh_table1 nvgre 0 0 """ + +show_pbh_statistics_after_toggling_counter="""\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 100 200 +pbh_table2 vxlan 0 0 +""" + + +show_pbh_statistics_after_toggling_rule="""\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 0 0 +pbh_table2 vxlan 300 400 +""" diff --git a/tests/pbh_input/state_db.json b/tests/pbh_input/state_db.json new file mode 100644 index 0000000000..dccc54cf40 --- /dev/null +++ b/tests/pbh_input/state_db.json @@ -0,0 +1,26 @@ +{ + "PBH_CAPABILITIES|table": { + "interface_list": "UPDATE", + "description": "UPDATE" + }, + "PBH_CAPABILITIES|rule": { + "priority": "UPDATE", + "ether_type": "ADD,UPDATE,REMOVE", + "ip_protocol": "ADD,UPDATE,REMOVE", + "ipv6_next_header": "ADD,UPDATE,REMOVE", + "l4_dst_port": "ADD,UPDATE,REMOVE", + "gre_key": "ADD,UPDATE,REMOVE", + "inner_ether_type": "ADD,UPDATE,REMOVE", + "hash": "UPDATE", + "packet_action": "ADD,UPDATE,REMOVE", + "flow_counter": "ADD,UPDATE,REMOVE" + }, + "PBH_CAPABILITIES|hash": { + "hash_field_list": "UPDATE" + }, + "PBH_CAPABILITIES|hash-field": { + "hash_field": "", + "ip_mask": "", + "sequence_id": "" + } +} diff --git a/tests/pbh_test.py b/tests/pbh_test.py index bc4c74db73..1972747782 100644 --- a/tests/pbh_test.py +++ b/tests/pbh_test.py @@ -37,6 +37,7 @@ def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" dbconnector.dedicated_dbs['CONFIG_DB'] = None + dbconnector.dedicated_dbs['STATE_DB'] = None dbconnector.dedicated_dbs['COUNTERS_DB'] = None @@ -116,7 +117,7 @@ def test_config_pbh_hash_field_add_mismatch_hash_field_ip_mask( logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 def test_config_pbh_hash_field_add_invalid_ip(self): @@ -132,7 +133,7 @@ def test_config_pbh_hash_field_add_invalid_ip(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 @pytest.mark.parametrize("hash_field_name,hash_field", [ @@ -155,7 +156,7 @@ def test_config_pbh_hash_field_add_none_ip_mask( logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 @pytest.mark.parametrize("hash_field_name,hash_field,updated_hash_field,sequence_id", [ @@ -169,6 +170,8 @@ def test_config_pbh_hash_field_update_hash_field_sequence_id_no_ip( updated_hash_field, sequence_id ): + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -190,7 +193,7 @@ def test_config_pbh_hash_field_update_hash_field_sequence_id_no_ip( logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == SUCCESS + assert result.exit_code == ERROR @pytest.mark.parametrize("hash_field_name,hash_field,updated_hash_field,ip_mask,updated_ip_mask", [ @@ -205,6 +208,8 @@ def test_config_pbh_hash_field_update_hash_field_ip_mask( ip_mask, updated_ip_mask ): + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -227,10 +232,12 @@ def test_config_pbh_hash_field_update_hash_field_ip_mask( logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == SUCCESS + assert result.exit_code == ERROR def test_config_pbh_hash_field_update_invalid_hash_field(self): + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -252,10 +259,12 @@ def test_config_pbh_hash_field_update_invalid_hash_field(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 def test_config_pbh_hash_field_update_invalid_ipv4_mask(self): + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -277,7 +286,7 @@ def test_config_pbh_hash_field_update_invalid_ipv4_mask(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 @pytest.mark.parametrize("hash_field_name,hash_field,ip_mask,updated_ip_mask", [ @@ -291,6 +300,8 @@ def test_config_pbh_hash_field_update_invalid_ip_mask( ip_mask, updated_ip_mask ): + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -313,7 +324,7 @@ def test_config_pbh_hash_field_update_invalid_ip_mask( logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 ########## CONFIG PBH HASH ########## @@ -321,6 +332,7 @@ def test_config_pbh_hash_field_update_invalid_ip_mask( def test_config_pbh_hash_add_delete_ipv4(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'hash_fields') + db = Db() runner = CliRunner() @@ -347,6 +359,8 @@ def test_config_pbh_hash_add_delete_ipv4(self): def test_config_pbh_hash_add_update_ipv6(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'hash_fields') + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -373,8 +387,8 @@ def test_config_pbh_hash_add_update_ipv6(self): assert result.exit_code == SUCCESS @pytest.mark.parametrize("hash_name,hash_field_list,exit_code", [ - ("inner_v6_hash", INVALID_VALUE, ERROR), - ("inner_v6_hash", "", ERROR), + ("inner_v6_hash", INVALID_VALUE, ERROR2), + ("inner_v6_hash", "", ERROR2), ("inner_v6_hash", None, ERROR2) ]) def test_config_pbh_hash_add_invalid_hash_field_list( @@ -384,6 +398,7 @@ def test_config_pbh_hash_add_invalid_hash_field_list( exit_code ): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'hash_fields') + db = Db() runner = CliRunner() @@ -403,6 +418,7 @@ def test_config_pbh_hash_add_invalid_hash_field_list( def test_config_pbh_table_add_delete_ports(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() runner = CliRunner() @@ -428,6 +444,8 @@ def test_config_pbh_table_add_delete_ports(self): def test_config_pbh_table_add_update_portchannels(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -474,6 +492,7 @@ def test_config_pbh_table_add_update_portchannels(self): def test_config_pbh_table_add_port_and_portchannel(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() runner = CliRunner() @@ -491,6 +510,7 @@ def test_config_pbh_table_add_port_and_portchannel(self): def test_config_pbh_table_add_invalid_port(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + db = Db() runner = CliRunner() @@ -502,11 +522,13 @@ def test_config_pbh_table_add_invalid_port(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 def test_config_pbh_table_add_update_invalid_interface(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'table') + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() @@ -528,7 +550,7 @@ def test_config_pbh_table_add_update_invalid_interface(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 ########## CONFIG PBH RULE ########## @@ -536,6 +558,7 @@ def test_config_pbh_table_add_update_invalid_interface(self): def test_config_pbh_rule_add_delete_nvgre(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() runner = CliRunner() @@ -563,12 +586,14 @@ def test_config_pbh_rule_add_delete_nvgre(self): def test_config_pbh_rule_add_update_vxlan(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + db = Db() runner = CliRunner() result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["add"], ["pbh_table1", "vxlan ", + commands["add"], ["pbh_table1", "vxlan", "--priority", "2", "--ip-protocol", "0x11", "--inner-ether-type", "0x0800","--l4-dst-port", "0x12b5", "--hash", "inner_v4_hash", "--packet-action", @@ -581,7 +606,8 @@ def test_config_pbh_rule_add_update_vxlan(self): result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["update"], ["pbh_table1", "vxlan ", + commands["update"].commands["field"]. + commands["set"], ["pbh_table1", "vxlan", "--priority", "3", "--inner-ether-type", "0x086d", "--packet-action", "SET_LAG_HASH", "--flow-counter", "DISABLED"], obj=db @@ -592,14 +618,61 @@ def test_config_pbh_rule_add_update_vxlan(self): assert result.exit_code == SUCCESS + def test_config_pbh_rule_update_nvgre_to_vxlan(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"],["pbh_table1", "nvgre", "--priority", "1", + "--ether-type", "0x0800", "--ip-protocol", "0x2f", + "--gre-key", "0x2500/0xffffff00", "--inner-ether-type", + "0x86dd", "--hash", "inner_v6_hash", "--packet-action", + "SET_ECMP_HASH", "--flow-counter", "DISABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["update"].commands["field"]. + commands["set"], ["pbh_table1", "nvgre", + "--ether-type", "0x86dd", "--ipv6-next-header", "0x11", + "--l4-dst-port", "0x12b5", "--inner-ether-type", "0x0800", + "--hash", "inner_v4_hash"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["update"].commands["field"]. + commands["del"], ["pbh_table1", "nvgre", + "--ip-protocol", "--gre-key", + "--packet-action", "--flow-counter"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + + def test_config_pbh_rule_update_invalid(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() runner = CliRunner() result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["add"], ["pbh_table1", "vxlan ", "--priority", + commands["add"], ["pbh_table1", "vxlan", "--priority", "2", "--ip-protocol", "0x11", "--inner-ether-type", "0x0800", "--l4-dst-port", "0x12b5", "--hash", "inner_v6_hash", "--packet-action", "SET_ECMP_HASH", @@ -612,7 +685,7 @@ def test_config_pbh_rule_update_invalid(self): result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["update"], ["pbh_table1", "vxlan ", + commands["update"], ["pbh_table1", "vxlan", "--flow-counter", INVALID_VALUE], obj=db ) @@ -623,12 +696,13 @@ def test_config_pbh_rule_update_invalid(self): def test_config_pbh_rule_add_invalid_ip_protocol(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() runner = CliRunner() result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["add"], ["pbh_table1", "vxlan ", "--priority", + commands["add"], ["pbh_table1", "vxlan", "--priority", "2", "--ip-protocol", INVALID_VALUE, "--inner-ether-type", "0x0800", "--l4-dst-port", "0x12b5", "--hash", "inner_v6_hash", "--packet-action", "SET_ECMP_HASH", "--flow-counter", @@ -637,17 +711,18 @@ def test_config_pbh_rule_add_invalid_ip_protocol(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 def test_config_pbh_rule_add_invalid_inner_ether_type(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() runner = CliRunner() result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["add"], ["pbh_table1", "vxlan ", "--priority", + commands["add"], ["pbh_table1", "vxlan", "--priority", "2", "--ip-protocol", "0x11", "--inner-ether-type", INVALID_VALUE, "--l4-dst-port", "0x12b5", "--hash", "inner_v6_hash", "--packet-action", "SET_ECMP_HASH", @@ -656,17 +731,18 @@ def test_config_pbh_rule_add_invalid_inner_ether_type(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 def test_config_pbh_rule_add_invalid_hash(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() runner = CliRunner() result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["add"], ["pbh_table1", "vxlan ", "--priority", + commands["add"], ["pbh_table1", "vxlan", "--priority", "2", "--ip-protocol", "0x11", "--inner-ether-type", "0x0800", "--l4-dst-port", "0x12b5", "--hash", INVALID_VALUE, "--packet-action", "SET_ECMP_HASH", "--flow-counter", @@ -675,17 +751,18 @@ def test_config_pbh_rule_add_invalid_hash(self): logger.debug("\n" + result.output) logger.debug(result.exit_code) - assert result.exit_code == ERROR + assert result.exit_code == ERROR2 def test_config_pbh_rule_add_invalid_packet_action(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() runner = CliRunner() result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["add"], ["pbh_table1", "vxlan ", "--priority", + commands["add"], ["pbh_table1", "vxlan", "--priority", "2", "--ip-protocol", "0x11", "--inner-ether-type", "0x0800", "--l4-dst-port", "0x12b5", "--hash", "inner_v6_hash", "--packet-action", INVALID_VALUE, @@ -699,12 +776,13 @@ def test_config_pbh_rule_add_invalid_packet_action(self): def test_config_pbh_rule_add_invalid_flow_counter(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'rule') + db = Db() runner = CliRunner() result = runner.invoke( config.config.commands["pbh"].commands["rule"]. - commands["add"], ["pbh_table1", "vxlan ", "--priority", + commands["add"], ["pbh_table1", "vxlan", "--priority", "2", "--ip-protocol", "0x11", "--inner-ether-type", "0x0800", "--l4-dst-port", "0x12b5", "--hash", "inner_v6_hash", "--packet-action", "SET_ECMP_HASH", @@ -719,6 +797,7 @@ def test_config_pbh_rule_add_invalid_flow_counter(self): def test_show_pbh_hash_field(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() runner = CliRunner() @@ -738,6 +817,7 @@ def test_show_pbh_hash_field(self): def test_show_pbh_hash(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() runner = CliRunner() @@ -757,6 +837,7 @@ def test_show_pbh_hash(self): def test_show_pbh_table(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() runner = CliRunner() @@ -776,6 +857,7 @@ def test_show_pbh_table(self): def test_show_pbh_rule(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + db = Db() runner = CliRunner() @@ -792,14 +874,19 @@ def test_show_pbh_rule(self): ########## SHOW PBH STATISTICS ########## - def test_show_pbh_statistics_on_empty_config(self): - dbconnector.dedicated_dbs['CONFIG_DB'] = None - dbconnector.dedicated_dbs['COUNTERS_DB'] = None + def remove_pbh_counters_file(self): SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' if os.path.isfile(SAVED_PBH_COUNTERS_FILE): os.remove(SAVED_PBH_COUNTERS_FILE) + + def test_show_pbh_statistics_on_empty_config(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = None + dbconnector.dedicated_dbs['COUNTERS_DB'] = None + + self.remove_pbh_counters_file() + db = Db() runner = CliRunner() @@ -818,9 +905,7 @@ def test_show_pbh_statistics(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') - SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' - if os.path.isfile(SAVED_PBH_COUNTERS_FILE): - os.remove(SAVED_PBH_COUNTERS_FILE) + self.remove_pbh_counters_file() db = Db() runner = CliRunner() @@ -840,9 +925,7 @@ def test_show_pbh_statistics_after_clear(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') - SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' - if os.path.isfile(SAVED_PBH_COUNTERS_FILE): - os.remove(SAVED_PBH_COUNTERS_FILE) + self.remove_pbh_counters_file() db = Db() runner = CliRunner() @@ -870,9 +953,7 @@ def test_show_pbh_statistics_after_clear_and_counters_updated(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') - SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' - if os.path.isfile(SAVED_PBH_COUNTERS_FILE): - os.remove(SAVED_PBH_COUNTERS_FILE) + self.remove_pbh_counters_file() db = Db() runner = CliRunner() @@ -901,10 +982,9 @@ def test_show_pbh_statistics_after_clear_and_counters_updated(self): def test_show_pbh_statistics_after_disabling_rule(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') - SAVED_PBH_COUNTERS_FILE = '/tmp/.pbh_counters.txt' - if os.path.isfile(SAVED_PBH_COUNTERS_FILE): - os.remove(SAVED_PBH_COUNTERS_FILE) + self.remove_pbh_counters_file() db = Db() runner = CliRunner() @@ -919,7 +999,8 @@ def test_show_pbh_statistics_after_disabling_rule(self): result = runner.invoke( config.config.commands["pbh"]. - commands["rule"].commands["update"], + commands["rule"].commands["update"]. + commands["field"].commands["set"], ["pbh_table2", "vxlan", "--flow-counter", "DISABLED"], obj=db ) @@ -937,3 +1018,102 @@ def test_show_pbh_statistics_after_disabling_rule(self): assert result.exit_code == SUCCESS assert result.output == assert_show_output.show_pbh_statistics_after_disabling_rule + + def test_show_pbh_statistics_after_flow_counter_toggle(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + dbconnector.dedicated_dbs['STATE_DB'] = os.path.join(mock_db_path, 'state_db') + + self.remove_pbh_counters_file() + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + config.config.commands["pbh"]. + commands["rule"].commands["update"]. + commands["field"].commands["set"], + ["pbh_table1", "nvgre", "--flow-counter", + "DISABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + config.config.commands["pbh"]. + commands["rule"].commands["update"]. + commands["field"].commands["set"], + ["pbh_table1", "nvgre", "--flow-counter", + "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_after_toggling_counter + + + def test_show_pbh_statistics_after_rule_toggle(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + self.remove_pbh_counters_file() + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + config.config.commands["pbh"]. + commands["rule"].commands["delete"], + ["pbh_table2", "vxlan"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + config.config.commands["pbh"].commands["rule"]. + commands["add"], ["pbh_table2", "vxlan", "--priority", + "2", "--ip-protocol", "0x11", "--inner-ether-type", + "0x0800", "--l4-dst-port", "0x12b5", "--hash", + "inner_v4_hash", "--packet-action", "SET_LAG_HASH", + "--flow-counter", "ENABLED"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_after_toggling_rule diff --git a/utilities_common/db.py b/utilities_common/db.py index d736aa1be7..136e1fc91e 100644 --- a/utilities_common/db.py +++ b/utilities_common/db.py @@ -1,5 +1,5 @@ from sonic_py_common import multi_asic, device_info -from swsscommon.swsscommon import ConfigDBConnector, SonicV2Connector +from swsscommon.swsscommon import ConfigDBConnector, ConfigDBPipeConnector, SonicV2Connector from utilities_common import constants from utilities_common.multi_asic import multi_asic_ns_choices @@ -10,6 +10,8 @@ def __init__(self): self.db_clients = {} self.cfgdb = ConfigDBConnector() self.cfgdb.connect() + self.cfgdb_pipe = ConfigDBPipeConnector() + self.cfgdb_pipe.connect() self.db = SonicV2Connector(host="127.0.0.1") # Skip connecting to chassis databases in line cards From 65a5a6b67730b74f9a0cec7b9bf17115fe7019c6 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Wed, 30 Mar 2022 13:29:53 -0700 Subject: [PATCH 04/11] Fixing get port speed when oper status is down (#2123) * Fixing get port speed when oper status is down Signed-off-by: Sudharsan Dhamal Gopalarathnam * Removing redundant if condition --- scripts/intfutil | 6 ++++-- scripts/portstat | 3 ++- scripts/voqutil | 5 ++++- tests/dump_tests/dump_state_test.py | 7 ++++--- tests/mock_tables/state_db.json | 1 + 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/scripts/intfutil b/scripts/intfutil index 1a42640529..5d8e743262 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -154,7 +154,8 @@ def port_oper_speed_get(db, intf_name): Get port oper speed """ oper_speed = db.get(db.STATE_DB, PORT_STATE_TABLE_PREFIX + intf_name, PORT_SPEED) - if oper_speed is None or oper_speed == "N/A": + oper_status = db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, PORT_OPER_STATUS) + if oper_speed is None or oper_speed == "N/A" or oper_status != "up": return appl_db_port_status_get(db, intf_name, PORT_SPEED) return '{}G'.format(oper_speed[:-3]) @@ -164,7 +165,8 @@ def port_oper_speed_get_raw(db, intf_name): Get port raw speed. E.g. 100000, 50000 and so on. """ speed = db.get(db.STATE_DB, PORT_STATE_TABLE_PREFIX + intf_name, PORT_SPEED) - if speed is None or speed == "N/A": + oper_status = db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, PORT_OPER_STATUS) + if speed is None or speed == "N/A" or oper_status != "up": speed = db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, PORT_SPEED) return speed diff --git a/scripts/portstat b/scripts/portstat index e1ca4a6fb1..24c085e9c1 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -216,7 +216,8 @@ class Portstat(object): for ns in self.multi_asic.get_ns_list_based_on_options(): self.db = multi_asic.connect_to_all_dbs_for_ns(ns) speed = self.db.get(self.db.STATE_DB, state_db_table_id, PORT_SPEED_FIELD) - if speed is None or speed == STATUS_NA: + oper_status = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_OPER_STATUS_FIELD) + if speed is None or speed == STATUS_NA or oper_status != "up": speed = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_SPEED_FIELD) if speed is not None: return int(speed) diff --git a/scripts/voqutil b/scripts/voqutil index 88637f3ec3..3a724d1255 100755 --- a/scripts/voqutil +++ b/scripts/voqutil @@ -38,6 +38,7 @@ SYSTEM_PORT_CORE = "core_index" SYSTEM_PORT_CORE_PORT = "core_port_index" SYSTEM_PORT_SPEED = "speed" SYSTEM_PORT_SWITCH_ID = "switch_id" +SYSTEM_PORT_OPER_STATUS = "oper_status" SYSTEM_NEIGH_TABLE_PREFIX = swsscommon.CHASSIS_APP_SYSTEM_NEIGH_TABLE_NAME + "|" SYSTEM_NEIGH_MAC = "neigh" @@ -116,7 +117,9 @@ def port_oper_speed_get(db, system_port_name): """ full_table_id = SYSTEM_STATE_PORT_TABLE_PREFIX + system_port_name oper_speed = db.get(db.STATE_DB, full_table_id, SYSTEM_PORT_SPEED) - if oper_speed is None or oper_speed == "N/A": + oper_status = db.get(db.APPL_DB, SYSTEM_PORT_TABLE_PREFIX + system_port_name, + SYSTEM_PORT_OPER_STATUS) + if oper_speed is None or oper_speed == "N/A" or oper_status != "up": return appl_db_system_port_status_get(db, system_port_name, SYSTEM_PORT_SPEED) return '{}G'.format(oper_speed[:-3]) diff --git a/tests/dump_tests/dump_state_test.py b/tests/dump_tests/dump_state_test.py index 4eb948ad72..fb1a211056 100644 --- a/tests/dump_tests/dump_state_test.py +++ b/tests/dump_tests/dump_state_test.py @@ -34,6 +34,7 @@ def compare_json_output(exp_json, rec, exclude_paths=None): | | | | PORT_TABLE|Ethernet0 | +------------------+--------------------------+ | | | | | | | | field | value | | | | | | | | |------------------+--------------------------| | | +| | | | | | speed | 100000 | | | | | | | | | supported_speeds | 10000,25000,40000,100000 | | | | | | | | +------------------+--------------------------+ | | | | | +----------------------+-------------------------------------------------+ | @@ -119,7 +120,7 @@ def test_identifier_single(self): expected = {'Ethernet0': {'CONFIG_DB': {'keys': [{'PORT|Ethernet0': {'alias': 'etp1', 'description': 'etp1', 'index': '0', 'lanes': '25,26,27,28', 'mtu': '9100', 'pfc_asym': 'off', 'speed': '40000'}}], 'tables_not_found': []}, 'APPL_DB': {'keys': [{'PORT_TABLE:Ethernet0': {'index': '0', 'lanes': '0', 'alias': 'Ethernet0', 'description': 'ARISTA01T2:Ethernet1', 'speed': '25000', 'oper_status': 'down', 'pfc_asym': 'off', 'mtu': '9100', 'fec': 'rs', 'admin_status': 'up'}}], 'tables_not_found': []}, 'ASIC_DB': {'keys': [{'ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000056d': {'SAI_HOSTIF_ATTR_NAME': 'Ethernet0', 'SAI_HOSTIF_ATTR_OBJ_ID': 'oid:0x10000000004a4', 'SAI_HOSTIF_ATTR_OPER_STATUS': 'true', 'SAI_HOSTIF_ATTR_TYPE': 'SAI_HOSTIF_TYPE_NETDEV', 'SAI_HOSTIF_ATTR_VLAN_TAG': 'SAI_HOSTIF_VLAN_TAG_STRIP'}}, {'ASIC_STATE:SAI_OBJECT_TYPE_PORT:oid:0x10000000004a4': {'NULL': 'NULL', 'SAI_PORT_ATTR_ADMIN_STATE': 'true', 'SAI_PORT_ATTR_MTU': '9122', 'SAI_PORT_ATTR_SPEED': '100000'}}], 'tables_not_found': [], 'vidtorid': {'oid:0xd00000000056d': 'oid:0xd', 'oid:0x10000000004a4': 'oid:0x1690000000001'}}, - 'STATE_DB': {'keys': [{'PORT_TABLE|Ethernet0': {'supported_speeds': '10000,25000,40000,100000'}}], 'tables_not_found': []}}} + 'STATE_DB': {'keys': [{'PORT_TABLE|Ethernet0': {'speed': '100000', 'supported_speeds': '10000,25000,40000,100000'}}], 'tables_not_found': []}}} assert result.exit_code == 0, "exit code: {}, Exception: {}, Traceback: {}".format(result.exit_code, result.exception, result.exc_info) # Cause other tests depend and change these paths in the mock_db, this test would fail everytime when a field or a value in changed in this path, creating noise @@ -136,7 +137,7 @@ def test_identifier_multiple(self): {"CONFIG_DB": {"keys": [{"PORT|Ethernet0": {"alias": "etp1", "description": "etp1", "index": "0", "lanes": "25,26,27,28", "mtu": "9100", "pfc_asym": "off", "speed": "40000"}}], "tables_not_found": []}, "APPL_DB": {"keys": [{"PORT_TABLE:Ethernet0": {"index": "0", "lanes": "0", "alias": "Ethernet0", "description": "ARISTA01T2:Ethernet1", "speed": "25000", "oper_status": "down", "pfc_asym": "off", "mtu": "9100", "fec": "rs", "admin_status": "up"}}], "tables_not_found": []}, "ASIC_DB": {"keys": [{"ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000056d": {"SAI_HOSTIF_ATTR_NAME": "Ethernet0", "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x10000000004a4", "SAI_HOSTIF_ATTR_OPER_STATUS": "true", "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV", "SAI_HOSTIF_ATTR_VLAN_TAG": "SAI_HOSTIF_VLAN_TAG_STRIP"}}, {"ASIC_STATE:SAI_OBJECT_TYPE_PORT:oid:0x10000000004a4": {"NULL": "NULL", "SAI_PORT_ATTR_ADMIN_STATE": "true", "SAI_PORT_ATTR_MTU": "9122", "SAI_PORT_ATTR_SPEED": "100000"}}], "tables_not_found": [], "vidtorid": {"oid:0xd00000000056d": "oid:0xd", "oid:0x10000000004a4": "oid:0x1690000000001"}}, - "STATE_DB": {"keys": [{"PORT_TABLE|Ethernet0": {"supported_speeds": "10000,25000,40000,100000"}}], "tables_not_found": []}}, + "STATE_DB": {"keys": [{"PORT_TABLE|Ethernet0": {"speed": "100000", "supported_speeds": "10000,25000,40000,100000"}}], "tables_not_found": []}}, "Ethernet4": {"CONFIG_DB": {"keys": [{"PORT|Ethernet4": {"admin_status": "up", "alias": "etp2", "description": "Servers0:eth0", "index": "1", "lanes": "29,30,31,32", "mtu": "9100", "pfc_asym": "off", "speed": "40000"}}], "tables_not_found": []}, "APPL_DB": {"keys": [], "tables_not_found": ["PORT_TABLE"]}, @@ -165,7 +166,7 @@ def test_option_db_filtering(self): result = runner.invoke(dump.state, ["port", "Ethernet0", "--db", "ASIC_DB", "--db", "STATE_DB"]) print(result.output) expected = {"Ethernet0": {"ASIC_DB": {"keys": [{"ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000056d": {"SAI_HOSTIF_ATTR_NAME": "Ethernet0", "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x10000000004a4", "SAI_HOSTIF_ATTR_OPER_STATUS": "true", "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV", "SAI_HOSTIF_ATTR_VLAN_TAG": "SAI_HOSTIF_VLAN_TAG_STRIP"}}, {"ASIC_STATE:SAI_OBJECT_TYPE_PORT:oid:0x10000000004a4": {"NULL": "NULL", "SAI_PORT_ATTR_ADMIN_STATE": "true", "SAI_PORT_ATTR_MTU": "9122", "SAI_PORT_ATTR_SPEED": "100000"}}], "tables_not_found": [], "vidtorid": {"oid:0xd00000000056d": "oid:0xd", "oid:0x10000000004a4": "oid:0x1690000000001"}}, - "STATE_DB": {"keys": [{"PORT_TABLE|Ethernet0": {"supported_speeds": "10000,25000,40000,100000"}}], "tables_not_found": []}}} + "STATE_DB": {"keys": [{"PORT_TABLE|Ethernet0": {"speed": "100000", "supported_speeds": "10000,25000,40000,100000"}}], "tables_not_found": []}}} assert result.exit_code == 0, "exit code: {}, Exception: {}, Traceback: {}".format(result.exit_code, result.exception, result.exc_info) ddiff = compare_json_output(expected, result.output) assert not ddiff, ddiff diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 6145868a9b..1cd881ac71 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -643,6 +643,7 @@ "access": "False" }, "PORT_TABLE|Ethernet0": { + "speed" : "100000", "supported_speeds": "10000,25000,40000,100000" }, "PORT_TABLE|Ethernet112": { From 6d3aa1e79d853493f06174962f6e510dcf55d7df Mon Sep 17 00:00:00 2001 From: Mohamed Ghoneim Date: Fri, 1 Apr 2022 09:19:08 -0700 Subject: [PATCH 05/11] [GCU] Optimizing moves by adding generators for keys/tables (#2120) #### What I did Fixes #2099 Grouping all fields under tables/keys to be added/deleted in 1 JsonChange. **BEFORE** how did we generate moves to try? - Start with the low level differences, low level means differences that has no children - Then extend this low level by different extenders, the most used is the `UpperLevelMoveExtender` - Extended the already extended moves to go upper in level, and do that multiple times until there are no possible moves to extend The diagram below shows: - Generation (low level): ```json {"op":"replace", "path":"/ACL_RULE/SNMP_ACL|RULE_1/SRC_IP", "value": "1.1.1.1/21"} {"op":"remove", "path":"/ACL_RULE/SNMP_ACL|RULE_2/PRIORITY"} {"op":"remove", "path":"/ACL_RULE/SNMP_ACL|RULE_2/SRC_IP"} {"op":"remove", "path":"/ACL_RULE/SNMP_ACL|RULE_2/IP_PROTOCOL"} {"op":"remove", "path":"/ACL_RULE/SNMP_ACL|RULE_2/PACKET_ACTION"} {"op":"add", "path":"/ACL_RULE/SNMP_ACL|RULE_3/PRIORITY", "value": "9997"} {"op":"add", "path":"/ACL_RULE/SNMP_ACL|RULE_3/SRC_IP", "value": "3.3.3.3/20"} {"op":"add", "path":"/ACL_RULE/SNMP_ACL|RULE_3/IP_PROTOCOL", "value": "17"} {"op":"add", "path":"/ACL_RULE/SNMP_ACL|RULE_3/PACKET_ACTION", "value": "ACCEPT"} ``` - Extension - 1st level ```json {"op":"replace", "path":"/ACL_RULE/SNMP_ACL|RULE_1", "value": {"PRIORITY": "9999","SRC_IP": "2.2.2.2/21","SRC_IP": "1.1.1.1/21","IP_PROTOCOL": "17","PACKET_ACTION": "ACCEPT"}} {"op":"remove", "path":"/ACL_RULE/SNMP_ACL|RULE_2"} {"op":"add", "path":"/ACL_RULE/SNMP_ACL|RULE_3", "value": {"PRIORITY": "9997","SRC_IP": "3.3.3.3/20","IP_PROTOCOL": "17","PACKET_ACTION": "ACCEPT"}} ``` - Extension - 2nd level ```json {"op":"replace", "path":"/ACL_RULE", "value": {"SNMP_ACL|RULE_1": {"PRIORITY": "9999","SRC_IP": "1.1.1.1/21","IP_PROTOCOL": "17","PACKET_ACTION": "ACCEPT"},"SNMP_ACL|RULE_3": {"PRIORITY": "9997","SRC_IP": "3.3.3.3/20","IP_PROTOCOL": "17","PACKET_ACTION": "ACCEPT"},"SNMP_ACL|DEFAULT_RULE": {"PRIORITY": "1","ETHER_TYPE": "2048","PACKET_ACTION": "DROP"}}} ``` - Extension - 3rd level ```json {"op":"replace", "path":"", "value": {"ACL_RULE": {"SNMP_ACL|RULE_1": {"PRIORITY": "9999","SRC_IP": "1.1.1.1/21","IP_PROTOCOL": "17","PACKET_ACTION": "ACCEPT"},"SNMP_ACL|RULE_3": {"PRIORITY": "9997","SRC_IP": "3.3.3.3/20","IP_PROTOCOL": "17","PACKET_ACTION": "ACCEPT"},"SNMP_ACL|DEFAULT_RULE": {"PRIORITY": "1","ETHER_TYPE": "2048","PACKET_ACTION": "DROP"}},"ACL_TABLE": {"SNMP_ACL": {"type": "CTRLPLANE","policy_desc": "SNMP_ACL","services": ["SNMP"]}}}} ``` ![image](https://user-images.githubusercontent.com/3927651/160676723-29688656-3382-4247-8358-cb988cda5134.png) **AFTER** In this PR, we introduce a different kind of generator. The non-extendable generators i.e. generators that their moves do not get extended using the extenders. We added 2 generators: - KeyLevelGenerator: if a whole key to be deleted or added, do that directly. - TableLevelGenerator: if a whole table to be deleted or added, do that directly. Only **enabled** KeyLevelGenerator, to enable **TableLevelGenerator** we have to confirm with Renuka if it is possible to update a whole table at once in the `ChangeApplier` (instead of just keys like it is today). We have to be able to update a table as a whole because within the table there can be internal dependencies between the object, so we have to guarantee all objects are deleted together. For the keys it is guaranteed for the whole key to be updated at once according to the `ChangeApplier`. **Why non-extendable generators?** Calling the non-extendable generators precedes the extendable ones, it is like checking for the low hanging fruits. If we use the same extenders for these new generators we will always go up the config tree. Imaging a move that tries to update a key but fails, then we call the extenders on this move. What will happen is the extenders will go up the config tree to the table level, will first try to replace the table, then try to delete the table. Deleting the table is a problem because it affects multiple more than intended and thus violates the minimum disruption guarantee. We decided to leave the extenders only for the LowLevelGenerator thus making sure we try all possible moves from the lowest levels and go up from there. Another way to look at it is like this: - Non-extendable generators do top-down search: Check tables, keys in this order - Extendable generators and extenders do bottom-up approach: Check fields, keys, tables in this order #### How I did it Modifying the `MoveWrapper.Generate` method to allow for different type of move generators #### How to verify it Unit-test Please check `tests/generic_config_updater/files/patch_sorter_test_success.json` to see how the moved generated got much compacted. #### Previous command output (if the output of a command-line utility has changed) #### New command output (if the output of a command-line utility has changed) --- generic_config_updater/patch_sorter.py | 116 ++- .../files/patch_sorter_test_success.json | 760 +++++++----------- .../patch_sorter_test.py | 240 +++++- 3 files changed, 624 insertions(+), 492 deletions(-) diff --git a/generic_config_updater/patch_sorter.py b/generic_config_updater/patch_sorter.py index e89c542e73..97db21b24e 100644 --- a/generic_config_updater/patch_sorter.py +++ b/generic_config_updater/patch_sorter.py @@ -290,29 +290,52 @@ def __hash__(self): return hash((self.op_type, self.path, json.dumps(self.value))) class MoveWrapper: - def __init__(self, move_generators, move_extenders, move_validators): + def __init__(self, move_generators, move_non_extendable_generators, move_extenders, move_validators): self.move_generators = move_generators + self.move_non_extendable_generators = move_non_extendable_generators self.move_extenders = move_extenders self.move_validators = move_validators def generate(self, diff): + """ + Generates all possible moves to help transform diff.current_config to diff.target_config. + + It starts by generating the non-extendable moves i.e. moves that will not extended to e.g. change its parent. + The non-extendable moves are mostly high level moves such as deleting/adding whole tables. + + After that it generates extendable moves i.e. moves that can be extended to e.g. change its parent. + The extendable moves are typically very low level moves that can achieve the minimum disruption guarantee. + + Lastly the moves are extended for example to try to replace the parent config instead, or by deleting + the dependencies of the config. + """ processed_moves = set() + extended_moves = set() moves = deque([]) + for move in self._generate_non_extendable_moves(diff): + if not(move in processed_moves): + processed_moves.add(move) + yield move + for move in self._generate_moves(diff): - if move in processed_moves: - continue - processed_moves.add(move) - yield move - moves.extend(self._extend_moves(move, diff)) + if not(move in processed_moves): + processed_moves.add(move) + yield move + + if not(move in extended_moves): + extended_moves.add(move) + moves.extend(self._extend_moves(move, diff)) while moves: move = moves.popleft() - if move in processed_moves: - continue - processed_moves.add(move) - yield move - moves.extend(self._extend_moves(move, diff)) + if not(move in processed_moves): + processed_moves.add(move) + yield move + + if not(move in extended_moves): + extended_moves.add(move) + moves.extend(self._extend_moves(move, diff)) def validate(self, move, diff): for validator in self.move_validators: @@ -328,6 +351,11 @@ def _generate_moves(self, diff): for move in generator.generate(diff): yield move + def _generate_non_extendable_moves(self, diff): + for generator in self.move_non_extendable_generators: + for move in generator.generate(diff): + yield move + def _extend_moves(self, move, diff): for extender in self.move_extenders: for newmove in extender.extend(move, diff): @@ -921,6 +949,68 @@ def validate(self, move, diff): return True +class TableLevelMoveGenerator: + """ + A class that key level moves. The item name at the root level of ConfigDB is called 'Table'. + + e.g. + { + "Table": ... + } + + This class will generate moves to remove tables if they are in current, but not target. It also add tables + if they are in target but not current configs. + """ + + def generate(self, diff): + # Removing tables in current but not target + for tokens in self._get_non_existing_tables_tokens(diff.current_config, diff.target_config): + yield JsonMove(diff, OperationType.REMOVE, tokens) + + # Adding tables in target but not current + for tokens in self._get_non_existing_tables_tokens(diff.target_config, diff.current_config): + yield JsonMove(diff, OperationType.ADD, tokens, tokens) + + def _get_non_existing_tables_tokens(self, config1, config2): + for table in config1: + if not(table in config2): + yield [table] + +class KeyLevelMoveGenerator: + """ + A class that key level moves. The item name at the root level of ConfigDB is called 'Table', the item + name in the Table level of ConfigDB is called key. + + e.g. + { + "Table": { + "Key": ... + } + } + + This class will generate moves to remove keys if they are in current, but not target. It also add keys + if they are in target but not current configs. + """ + def generate(self, diff): + # Removing keys in current but not target + for tokens in self._get_non_existing_keys_tokens(diff.current_config, diff.target_config): + table = tokens[0] + # if table has a single key, delete the whole table because empty tables are not allowed in ConfigDB + if len(diff.current_config[table]) == 1: + yield JsonMove(diff, OperationType.REMOVE, [table]) + else: + yield JsonMove(diff, OperationType.REMOVE, tokens) + + # Adding keys in target but not current + for tokens in self._get_non_existing_keys_tokens(diff.target_config, diff.current_config): + yield JsonMove(diff, OperationType.ADD, tokens, tokens) + + def _get_non_existing_keys_tokens(self, config1, config2): + for table in config1: + for key in config1[table]: + if not(table in config2) or not (key in config2[table]): + yield [table, key] + class LowLevelMoveGenerator: """ A class to generate the low level moves i.e. moves corresponding to differences between current/target config @@ -1407,6 +1497,8 @@ def __init__(self, operation_wrapper, config_wrapper, path_addressing): def create(self, algorithm=Algorithm.DFS): move_generators = [LowLevelMoveGenerator(self.path_addressing)] + # TODO: Enable TableLevelMoveGenerator once it is confirmed whole table can be updated at the same time + move_non_extendable_generators = [KeyLevelMoveGenerator()] move_extenders = [RequiredValueMoveExtender(self.path_addressing, self.operation_wrapper), UpperLevelMoveExtender(), DeleteInsteadOfReplaceMoveExtender(), @@ -1419,7 +1511,7 @@ def create(self, algorithm=Algorithm.DFS): RequiredValueMoveValidator(self.path_addressing), NoEmptyTableMoveValidator(self.path_addressing)] - move_wrapper = MoveWrapper(move_generators, move_extenders, move_validators) + move_wrapper = MoveWrapper(move_generators, move_non_extendable_generators, move_extenders, move_validators) if algorithm == Algorithm.DFS: sorter = DfsSorter(move_wrapper) diff --git a/tests/generic_config_updater/files/patch_sorter_test_success.json b/tests/generic_config_updater/files/patch_sorter_test_success.json index 894f68896c..c134b0b5e2 100644 --- a/tests/generic_config_updater/files/patch_sorter_test_success.json +++ b/tests/generic_config_updater/files/patch_sorter_test_success.json @@ -169,16 +169,10 @@ "op": "add", "path": "/INTERFACE/Ethernet8|10.0.0.1~130", "value": { - "family": "IPv4" + "family": "IPv4", + "scope": "global" } } - ], - [ - { - "op": "add", - "path": "/INTERFACE/Ethernet8|10.0.0.1~130/scope", - "value": "global" - } ] ] }, @@ -282,39 +276,15 @@ "op": "add", "path": "/ACL_TABLE/EVERFLOWV6", "value": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", "type": "MIRRORV6" } } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/policy_desc", - "value": "EVERFLOWV6" - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/ports", - "value": [ - "Ethernet4" - ] - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/ports/1", - "value": "Ethernet8" - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/stage", - "value": "ingress" - } ] ] }, @@ -419,129 +389,57 @@ "path": "/ACL_TABLE", "value": { "NO-NSW-PACL-V4": { - "type": "L3" + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] } } } ], - [ - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/policy_desc", - "value": "NO-NSW-PACL-V4" - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", - "value": [ - "Ethernet0" - ] - } - ], [ { "op": "add", "path": "/ACL_TABLE/DATAACL", "value": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", "type": "L3" } } ], - [ - { - "op": "add", - "path": "/ACL_TABLE/DATAACL/policy_desc", - "value": "DATAACL" - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/DATAACL/ports", - "value": [ - "Ethernet4" - ] - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/DATAACL/stage", - "value": "ingress" - } - ], [ { "op": "add", "path": "/ACL_TABLE/EVERFLOW", "value": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", "type": "MIRROR" } } ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOW/policy_desc", - "value": "EVERFLOW" - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOW/ports", - "value": [ - "Ethernet8" - ] - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOW/stage", - "value": "ingress" - } - ], [ { "op": "add", "path": "/ACL_TABLE/EVERFLOWV6", "value": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", "type": "MIRRORV6" } } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/policy_desc", - "value": "EVERFLOWV6" - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/ports", - "value": [ - "Ethernet4" - ] - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/ports/1", - "value": "Ethernet8" - } - ], - [ - { - "op": "add", - "path": "/ACL_TABLE/EVERFLOWV6/stage", - "value": "ingress" - } ] ] }, @@ -979,30 +877,34 @@ [ { "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", - "value": [ - "Ethernet0" - ] + "path": "/PORT/Ethernet3", + "value": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + } } ], [ { "op": "add", - "path": "/VLAN_MEMBER", + "path": "/PORT/Ethernet2", "value": { - "Vlan100|Ethernet0": { - "tagging_mode": "untagged" - } + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" } } ], [ { "op": "add", - "path": "/PORT/Ethernet3", + "path": "/PORT/Ethernet1", "value": { - "alias": "Eth1/4", - "lanes": "68", + "alias": "Eth1/2", + "lanes": "66", "description": "", "speed": "10000" } @@ -1011,14 +913,18 @@ [ { "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", - "value": "Ethernet3" + "path": "/VLAN_MEMBER", + "value": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + } + } } ], [ { "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet3", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1", "value": { "tagging_mode": "untagged" } @@ -1027,22 +933,12 @@ [ { "op": "add", - "path": "/PORT/Ethernet2", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3", "value": { - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": "10000" + "tagging_mode": "untagged" } } ], - [ - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", - "value": "Ethernet2" - } - ], [ { "op": "add", @@ -1055,13 +951,10 @@ [ { "op": "add", - "path": "/PORT/Ethernet1", - "value": { - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": "10000" - } + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", + "value": [ + "Ethernet0" + ] } ], [ @@ -1073,23 +966,16 @@ ], [ { - "op": "replace", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", - "value": [ - "Ethernet0", - "Ethernet1", - "Ethernet2", - "Ethernet3" - ] + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", + "value": "Ethernet2" } ], [ { "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet1", - "value": { - "tagging_mode": "untagged" - } + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", + "value": "Ethernet3" } ] ] @@ -1220,6 +1106,24 @@ } ], "expected_changes": [ + [ + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1" + } + ], + [ + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet2" + } + ], + [ + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3" + } + ], [ { "op": "replace", @@ -1283,24 +1187,6 @@ "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" } ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - } - ], - [ - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet1" - } - ], [ { "op": "remove", @@ -1310,7 +1196,7 @@ [ { "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet2" + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" } ], [ @@ -1322,7 +1208,7 @@ [ { "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet3" + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" } ], [ @@ -1358,6 +1244,17 @@ "path": "/ACL_TABLE" } ], + [ + { + "op": "add", + "path": "/VLAN_MEMBER", + "value": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + } + } + } + ], [ { "op": "add", @@ -1369,17 +1266,31 @@ } } ], + [ + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", + "value": [ + "Ethernet0" + ] + } + ], [ { "op": "remove", - "path": "/PORT" + "path": "/VLAN_MEMBER" } ], [ { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/policy_desc", - "value": "NO-NSW-PACL-V4" + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports" + } + ], + [ + { + "op": "remove", + "path": "/PORT" } ], [ @@ -1389,22 +1300,13 @@ "value": { "Ethernet0": { "alias": "Eth1", - "lanes": "65, 66, 67, 68", "description": "Ethernet0 100G link", + "lanes": "65, 66, 67, 68", "speed": "100000" } } } ], - [ - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", - "value": [ - "Ethernet0" - ] - } - ], [ { "op": "add", @@ -1415,6 +1317,22 @@ } } } + ], + [ + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/policy_desc", + "value": "NO-NSW-PACL-V4" + } + ], + [ + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", + "value": [ + "Ethernet0" + ] + } ] ] }, @@ -2085,18 +2003,6 @@ } ], "expected_changes": [ - [ - { - "op": "remove", - "path": "/PORT/Ethernet0/alias" - } - ], - [ - { - "op": "remove", - "path": "/PORT/Ethernet0/description" - } - ], [ { "op": "remove", @@ -2154,18 +2060,6 @@ } ], "expected_changes": [ - [ - { - "op": "remove", - "path": "/INTERFACE/Ethernet8|10.0.0.1~130/family" - } - ], - [ - { - "op": "remove", - "path": "/INTERFACE/Ethernet8|10.0.0.1~130/scope" - } - ], [ { "op": "remove", @@ -2378,90 +2272,24 @@ } ], "expected_changes": [ - [ - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/policy_desc" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/DATAACL/policy_desc" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/DATAACL/stage" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/EVERFLOW/policy_desc" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/EVERFLOW/stage" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/EVERFLOWV6/policy_desc" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/EVERFLOWV6/ports/0" - } - ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/EVERFLOWV6/stage" - } - ], [ { "op": "remove", "path": "/ACL_TABLE/NO-NSW-PACL-V4" } ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/DATAACL/ports" - } - ], [ { "op": "remove", "path": "/ACL_TABLE/DATAACL" } ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/EVERFLOW/ports" - } - ], [ { "op": "remove", "path": "/ACL_TABLE/EVERFLOW" } ], - [ - { - "op": "remove", - "path": "/ACL_TABLE/EVERFLOWV6/ports" - } - ], [ { "op": "remove", @@ -2752,6 +2580,17 @@ "path": "/ACL_TABLE" } ], + [ + { + "op": "add", + "path": "/VLAN_MEMBER", + "value": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + } + } + } + ], [ { "op": "add", @@ -2763,17 +2602,31 @@ } } ], + [ + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", + "value": [ + "Ethernet0" + ] + } + ], [ { "op": "remove", - "path": "/PORT" + "path": "/VLAN_MEMBER" } ], [ { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/policy_desc", - "value": "NO-NSW-PACL-V4" + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports" + } + ], + [ + { + "op": "remove", + "path": "/PORT" } ], [ @@ -2783,22 +2636,13 @@ "value": { "Ethernet0": { "alias": "Eth1", - "lanes": "67", "description": "Ethernet0 100G link", + "lanes": "67", "speed": "100000" } } } ], - [ - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", - "value": [ - "Ethernet0" - ] - } - ], [ { "op": "add", @@ -2809,6 +2653,22 @@ } } } + ], + [ + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/policy_desc", + "value": "NO-NSW-PACL-V4" + } + ], + [ + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports", + "value": [ + "Ethernet0" + ] + } ] ] }, @@ -4267,6 +4127,127 @@ } ], "expected_changes": [ + [ + { + "op": "add", + "path": "/BGP_NEIGHBOR/10.0.0.33", + "value": { + "admin_status": "up", + "asn": "64001", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.32", + "name": "ARISTA01T0", + "nhopself": "0", + "rrclient": "0" + } + } + ], + [ + { + "op": "add", + "path": "/BGP_NEIGHBOR/fc00::42", + "value": { + "admin_status": "up", + "asn": "64001", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::41", + "name": "ARISTA01T0", + "nhopself": "0", + "rrclient": "0" + } + } + ], + [ + { + "op": "add", + "path": "/BUFFER_PG/Ethernet64|3-4", + "value": { + "profile": "pg_lossless_40000_40m_profile" + } + } + ], + [ + { + "op": "add", + "path": "/BUFFER_PG/Ethernet64|0", + "value": { + "profile": "ingress_lossy_profile" + } + } + ], + [ + { + "op": "add", + "path": "/BUFFER_QUEUE/Ethernet64|0-2", + "value": { + "profile": "egress_lossy_profile" + } + } + ], + [ + { + "op": "add", + "path": "/BUFFER_QUEUE/Ethernet64|3-4", + "value": { + "profile": "egress_lossless_profile" + } + } + ], + [ + { + "op": "add", + "path": "/BUFFER_QUEUE/Ethernet64|5-6", + "value": { + "profile": "egress_lossy_profile" + } + } + ], + [ + { + "op": "add", + "path": "/DEVICE_NEIGHBOR/Ethernet64", + "value": { + "name": "ARISTA01T0", + "port": "Ethernet1" + } + } + ], + [ + { + "op": "add", + "path": "/INTERFACE/Ethernet64", + "value": {} + } + ], + [ + { + "op": "add", + "path": "/INTERFACE/Ethernet64|10.0.0.32~131", + "value": {} + } + ], + [ + { + "op": "add", + "path": "/INTERFACE/Ethernet64|FC00::41~1126", + "value": {} + } + ], + [ + { + "op": "add", + "path": "/PORT_QOS_MAP/Ethernet64", + "value": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + } + ], [ { "op": "add", @@ -4603,88 +4584,6 @@ "value": "up" } ], - [ - { - "op": "add", - "path": "/BUFFER_PG/Ethernet64|3-4", - "value": { - "profile": "pg_lossless_40000_40m_profile" - } - } - ], - [ - { - "op": "add", - "path": "/BUFFER_PG/Ethernet64|0", - "value": { - "profile": "ingress_lossy_profile" - } - } - ], - [ - { - "op": "add", - "path": "/BUFFER_QUEUE/Ethernet64|0-2", - "value": { - "profile": "egress_lossy_profile" - } - } - ], - [ - { - "op": "add", - "path": "/BUFFER_QUEUE/Ethernet64|3-4", - "value": { - "profile": "egress_lossless_profile" - } - } - ], - [ - { - "op": "add", - "path": "/BUFFER_QUEUE/Ethernet64|5-6", - "value": { - "profile": "egress_lossy_profile" - } - } - ], - [ - { - "op": "add", - "path": "/DEVICE_NEIGHBOR/Ethernet64", - "value": { - "name": "ARISTA01T0" - } - } - ], - [ - { - "op": "add", - "path": "/DEVICE_NEIGHBOR/Ethernet64/port", - "value": "Ethernet1" - } - ], - [ - { - "op": "add", - "path": "/INTERFACE/Ethernet64", - "value": {} - } - ], - [ - { - "op": "add", - "path": "/INTERFACE/Ethernet64|10.0.0.32~131", - "value": {} - } - ], - [ - { - "op": "add", - "path": "/INTERFACE/Ethernet64|FC00::41~1126", - "value": {} - } - ], [ { "op": "replace", @@ -4692,81 +4591,12 @@ "value": "ARISTA01T0:Ethernet1" } ], - [ - { - "op": "add", - "path": "/PORT_QOS_MAP/Ethernet64", - "value": { - "dscp_to_tc_map": "AZURE" - } - } - ], - [ - { - "op": "add", - "path": "/PORT_QOS_MAP/Ethernet64/pfc_enable", - "value": "3,4" - } - ], - [ - { - "op": "add", - "path": "/PORT_QOS_MAP/Ethernet64/pfc_to_queue_map", - "value": "AZURE" - } - ], - [ - { - "op": "add", - "path": "/PORT_QOS_MAP/Ethernet64/tc_to_pg_map", - "value": "AZURE" - } - ], - [ - { - "op": "add", - "path": "/PORT_QOS_MAP/Ethernet64/tc_to_queue_map", - "value": "AZURE" - } - ], [ { "op": "add", "path": "/PORT/Ethernet64/admin_status", "value": "up" } - ], - [ - { - "op": "add", - "path": "/BGP_NEIGHBOR/10.0.0.33", - "value": { - "admin_status": "up", - "asn": "64001", - "holdtime": "10", - "keepalive": "3", - "local_addr": "10.0.0.32", - "name": "ARISTA01T0", - "nhopself": "0", - "rrclient": "0" - } - } - ], - [ - { - "op": "add", - "path": "/BGP_NEIGHBOR/fc00::42", - "value": { - "admin_status": "up", - "asn": "64001", - "holdtime": "10", - "keepalive": "3", - "local_addr": "fc00::41", - "name": "ARISTA01T0", - "nhopself": "0", - "rrclient": "0" - } - } ] ] } diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py index 12d76c5283..5530de7cef 100644 --- a/tests/generic_config_updater/patch_sorter_test.py +++ b/tests/generic_config_updater/patch_sorter_test.py @@ -498,21 +498,23 @@ def setUp(self): def test_ctor__assigns_values_correctly(self): # Arrange move_generators = Mock() + move_non_extendable_generators = Mock() move_extenders = Mock() move_validators = Mock() # Act - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, move_validators) + move_wrapper = ps.MoveWrapper(move_generators, move_non_extendable_generators, move_extenders, move_validators) # Assert self.assertIs(move_generators, move_wrapper.move_generators) + self.assertIs(move_non_extendable_generators, move_wrapper.move_non_extendable_generators) self.assertIs(move_extenders, move_wrapper.move_extenders) self.assertIs(move_validators, move_wrapper.move_validators) def test_generate__single_move_generator__single_move_returned(self): # Arrange move_generators = [self.single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) + move_wrapper = ps.MoveWrapper(move_generators, [], [], []) expected = [self.any_move] # Act @@ -524,7 +526,7 @@ def test_generate__single_move_generator__single_move_returned(self): def test_generate__multiple_move_generator__multiple_move_returned(self): # Arrange move_generators = [self.multiple_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) + move_wrapper = ps.MoveWrapper(move_generators, [], [], []) expected = [self.any_move, self.any_other_move1, self.any_other_move2] # Act @@ -536,7 +538,7 @@ def test_generate__multiple_move_generator__multiple_move_returned(self): def test_generate__different_move_generators__different_moves_returned(self): # Arrange move_generators = [self.single_move_generator, self.another_single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) + move_wrapper = ps.MoveWrapper(move_generators, [], [], []) expected = [self.any_move, self.any_other_move1] # Act @@ -548,7 +550,44 @@ def test_generate__different_move_generators__different_moves_returned(self): def test_generate__duplicate_generated_moves__unique_moves_returned(self): # Arrange move_generators = [self.single_move_generator, self.single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) + move_wrapper = ps.MoveWrapper(move_generators, [], [], []) + expected = [self.any_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__different_move_non_extendable_generators__different_moves_returned(self): + # Arrange + move_non_extendable_generators = [self.single_move_generator, self.another_single_move_generator] + move_wrapper = ps.MoveWrapper([], move_non_extendable_generators, [], []) + expected = [self.any_move, self.any_other_move1] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__duplicate_generated_non_extendable_moves__unique_moves_returned(self): + # Arrange + move_non_extendable_generators = [self.single_move_generator, self.single_move_generator] + move_wrapper = ps.MoveWrapper([], move_non_extendable_generators, [], []) + expected = [self.any_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__duplicate_move_between_extendable_and_non_extendable_generators__unique_move_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_non_extendable_generators = [self.single_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, move_non_extendable_generators, [], []) expected = [self.any_move] # Act @@ -561,7 +600,7 @@ def test_generate__single_move_extender__one_extended_move_returned(self): # Arrange move_generators = [self.single_move_generator] move_extenders = [self.single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) expected = [self.any_move, self.any_extended_move] # Act @@ -574,7 +613,7 @@ def test_generate__multiple_move_extender__multiple_extended_move_returned(self) # Arrange move_generators = [self.single_move_generator] move_extenders = [self.multiple_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2] # Act @@ -587,7 +626,7 @@ def test_generate__different_move_extenders__different_extended_moves_returned(s # Arrange move_generators = [self.single_move_generator] move_extenders = [self.single_move_extender, self.another_single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1] # Act @@ -600,7 +639,7 @@ def test_generate__duplicate_extended_moves__unique_moves_returned(self): # Arrange move_generators = [self.single_move_generator] move_extenders = [self.single_move_extender, self.single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) expected = [self.any_move, self.any_extended_move] # Act @@ -613,7 +652,7 @@ def test_generate__mixed_extended_moves__unique_moves_returned(self): # Arrange move_generators = [self.single_move_generator, self.another_single_move_generator] move_extenders = [self.mixed_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + move_wrapper = ps.MoveWrapper(move_generators, [], move_extenders, []) expected = [self.any_move, self.any_other_move1, self.any_extended_move, @@ -626,10 +665,54 @@ def test_generate__mixed_extended_moves__unique_moves_returned(self): # Assert self.assertListEqual(expected, actual) + def test_generate__multiple_non_extendable_moves__no_moves_extended(self): + # Arrange + move_non_extendable_generators = [self.single_move_generator, self.another_single_move_generator] + move_extenders = [self.mixed_move_extender] + move_wrapper = ps.MoveWrapper([], move_non_extendable_generators, move_extenders, []) + expected = [self.any_move, self.any_other_move1] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__mixed_extendable_non_extendable_moves__only_extendable_moves_extended(self): + # Arrange + move_generators = [self.another_single_move_generator] # generates: any_other_move1, extends: any_other_extended_move1 + move_non_extendable_generators = [self.single_move_generator] # generates: any_move + move_extenders = [self.mixed_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_non_extendable_generators, move_extenders, []) + expected = [self.any_move, + self.any_other_move1, + self.any_other_extended_move1] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__move_is_extendable_and_non_extendable__move_is_extended(self): + # Arrange + move_generators = [self.single_move_generator] + move_non_extendable_generators = [self.single_move_generator] + move_extenders = [self.single_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_non_extendable_generators, move_extenders, []) + expected = [self.any_move, + self.any_extended_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + def test_validate__validation_fail__false_returned(self): # Arrange move_validators = [self.fail_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) + move_wrapper = ps.MoveWrapper([], [], [], move_validators) # Act and assert self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) @@ -637,7 +720,7 @@ def test_validate__validation_fail__false_returned(self): def test_validate__validation_succeed__true_returned(self): # Arrange move_validators = [self.success_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) + move_wrapper = ps.MoveWrapper([], [], [], move_validators) # Act and assert self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) @@ -645,7 +728,7 @@ def test_validate__validation_succeed__true_returned(self): def test_validate__multiple_validators_last_fail___false_returned(self): # Arrange move_validators = [self.success_move_validator, self.success_move_validator, self.fail_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) + move_wrapper = ps.MoveWrapper([], [], [], move_validators) # Act and assert self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) @@ -653,7 +736,7 @@ def test_validate__multiple_validators_last_fail___false_returned(self): def test_validate__multiple_validators_succeed___true_returned(self): # Arrange move_validators = [self.success_move_validator, self.success_move_validator, self.success_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) + move_wrapper = ps.MoveWrapper([], [], [], move_validators) # Act and assert self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) @@ -662,7 +745,7 @@ def test_simulate__applies_move(self): # Arrange diff = Mock() diff.apply_move.side_effect = create_side_effect_dict({(str(self.any_move), ): self.any_diff}) - move_wrapper = ps.MoveWrapper(None, None, None) + move_wrapper = ps.MoveWrapper(None, None, None, None) # Act actual = move_wrapper.simulate(self.any_move, diff) @@ -1862,6 +1945,130 @@ def _get_no_critical_port_change_test_cases(self): def _apply_operations(self, config, operations): return jsonpatch.JsonPatch(operations).apply(config) +class TestTableLevelMoveGenerator(unittest.TestCase): + def setUp(self): + self.generator = ps.TableLevelMoveGenerator() + + def test_generate__tables_in_current_but_not_target__tables_deleted_moves(self): + self.verify(current = {"ExistingTable": {}, "NonExistingTable1": {}, "NonExistingTable2": {}}, + target = {"ExistingTable": {}}, + ex_ops = [{"op": "remove", 'path': '/NonExistingTable1'}, + {"op": "remove", 'path': '/NonExistingTable2'}]) + + def test_generate__tables_in_target_but_not_current__tables_added_moves(self): + self.verify(current = {"ExistingTable": {}}, + target = {"ExistingTable": {}, "NonExistingTable1": {}, "NonExistingTable2": {}}, + ex_ops = [{"op": "add", 'path': '/NonExistingTable1', 'value': {}}, + {"op": "add", 'path': '/NonExistingTable2', 'value': {}}]) + + def test_generate__all_tables_exist__no_moves(self): + self.verify(current = {"ExistingTable1": { "Key1": "Value1" }, "ExistingTable2": {}}, + target = {"ExistingTable1": {}, "ExistingTable2": { "Key2": "Value2" }}, + ex_ops = []) + + def test_generate__multiple_cases__deletion_precedes_addition(self): + self.verify(current = {"CommonTable": { "Key1": "Value1" }, "CurrentTable": {}}, + target = {"CommonTable": {}, "TargetTable": { "Key2": "Value2" }}, + ex_ops = [{"op": "remove", 'path': '/CurrentTable'}, + {"op": "add", 'path': '/TargetTable', 'value': { "Key2": "Value2" }}]) + + def verify(self, current, target, ex_ops): + # Arrange + diff = ps.Diff(current, target) + + # Act + moves = self.generator.generate(diff) + + # Assert + self.verify_moves(ex_ops, + moves) + + def verify_moves(self, ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ops, moves_ops) + +class TestKeyLevelMoveGenerator(unittest.TestCase): + def setUp(self): + self.generator = ps.KeyLevelMoveGenerator() + + def test_generate__keys_in_current_but_not_target__keys_deleted_moves(self): + self.verify(current = { + "ExistingTable1": { + "ExistingKey11": "Value11", + "NonExistingKey12" : "Value12", + "NonExistingKey13" : "Value13" + }, + "NonExistingTable2": + { + "NonExistingKey21" : "Value21", + "NonExistingKey22" : "Value22" + } + }, + target = { + "ExistingTable1": { + "ExistingKey11": "Value11" + } + }, + ex_ops = [{"op": "remove", 'path': '/ExistingTable1/NonExistingKey12'}, + {"op": "remove", 'path': '/ExistingTable1/NonExistingKey13'}, + {"op": "remove", 'path': '/NonExistingTable2/NonExistingKey21'}, + {"op": "remove", 'path': '/NonExistingTable2/NonExistingKey22'}]) + + + def test_generate__single_key_in_current_but_not_target__whole_table_deleted(self): + self.verify(current = { "ExistingTable1": { "NonExistingKey11" : "Value11" }}, + target = {}, + ex_ops = [{"op": "remove", 'path': '/ExistingTable1'}]) + + def test_generate__keys_in_target_but_not_current__keys_added_moves(self): + self.verify(current = { + "ExistingTable1": { + "ExistingKey11": "Value11" + } + }, + target = { + "ExistingTable1": { + "ExistingKey11": "Value11", + "NonExistingKey12" : "Value12", + "NonExistingKey13" : "Value13" + }, + "NonExistingTable2": + { + "NonExistingKey21" : "Value21", + "NonExistingKey22" : "Value22" + } + }, + ex_ops = [{"op": "add", 'path': '/ExistingTable1/NonExistingKey12', "value": "Value12"}, + {"op": "add", 'path': '/ExistingTable1/NonExistingKey13', "value": "Value13"}, + {"op": "add", 'path': '/NonExistingTable2', "value": { "NonExistingKey21": "Value21" }}, + {"op": "add", 'path': '/NonExistingTable2', "value": { "NonExistingKey22": "Value22" }}]) + + def test_generate__all_keys_exist__no_moves(self): + self.verify(current = {"ExistingTable1": { "Key1": "Value1Current" }, "ExistingTable2": { "Key2": "Value2" }}, + target = {"ExistingTable1": { "Key1": "Value1Target" }, "ExistingTable2": { "Key2": {} } }, + ex_ops = []) + + def test_generate__multiple_cases__deletion_precedes_addition(self): + self.verify(current = {"AnyTable": { "CommonKey": "CurrentValue1", "CurrentKey": "CurrentValue2" }}, + target = {"AnyTable": { "CommonKey": "TargetValue1", "TargetKey": "TargetValue2" }}, + ex_ops = [{"op": "remove", 'path': '/AnyTable/CurrentKey'}, + {"op": "add", 'path': '/AnyTable/TargetKey', 'value': "TargetValue2"}]) + + def verify(self, current, target, ex_ops): + # Arrange + diff = ps.Diff(current, target) + + # Act + moves = self.generator.generate(diff) + + # Assert + self.verify_moves(ex_ops, + moves) + + def verify_moves(self, ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ops, moves_ops) + class TestLowLevelMoveGenerator(unittest.TestCase): def setUp(self): path_addressing = PathAddressing() @@ -2823,6 +3030,7 @@ def verify(self, algo, algo_class): config_wrapper = ConfigWrapper() factory = ps.SortAlgorithmFactory(OperationWrapper(), config_wrapper, PathAddressing(config_wrapper)) expected_generators = [ps.LowLevelMoveGenerator] + expected_non_extendable_generators = [ps.KeyLevelMoveGenerator] expected_extenders = [ps.RequiredValueMoveExtender, ps.UpperLevelMoveExtender, ps.DeleteInsteadOfReplaceMoveExtender, @@ -2838,12 +3046,14 @@ def verify(self, algo, algo_class): # Act sorter = factory.create(algo) actual_generators = [type(item) for item in sorter.move_wrapper.move_generators] + actual_non_extendable_generators = [type(item) for item in sorter.move_wrapper.move_non_extendable_generators] actual_extenders = [type(item) for item in sorter.move_wrapper.move_extenders] actual_validators = [type(item) for item in sorter.move_wrapper.move_validators] # Assert self.assertIsInstance(sorter, algo_class) self.assertCountEqual(expected_generators, actual_generators) + self.assertCountEqual(expected_non_extendable_generators, actual_non_extendable_generators) self.assertCountEqual(expected_extenders, actual_extenders) self.assertCountEqual(expected_validator, actual_validators) From 51d3550d9192e4b751b96d4017cc79b0c91150e8 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Sat, 2 Apr 2022 14:58:03 +0800 Subject: [PATCH 06/11] Fix issues in clear_qos (#2122) - What I did Fix issues in clear_qos 1. Remove tables BUFFER_PORT_INGRESS_PROFILE_LIST and BUFFER_PORT_EGRESS_PROFILE_LIST When clear_qos is executed, all QoS- and buffer-related tables should be removed. Currently, both tables are missed. This is to fix it. 2. Adjust the order in which the buffer and QoS tables are removed Remove the tables whose entries depend on other tables first and then the tables whose entries are referenced by other tables. Any object can be removed from SAI only if there is no object referencing it. There are retrying and dependency-tracking mechanisms to guarantee it in orchagent. If CLI operates the table in an order where the referencing tables removed first before referenced tables, it is possible to reduce the probability to retry theoretically. The order does not guarantee it but adjusting the order is almost zero-cost so we would like to do it. - How I did it - How to verify it Signed-off-by: Stephen Sun stephens@nvidia.com --- config/main.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/config/main.py b/config/main.py index 1632ffd114..ed38bcc841 100644 --- a/config/main.py +++ b/config/main.py @@ -635,6 +635,8 @@ def _clear_cbf(): def _clear_qos(): QOS_TABLE_NAMES = [ + 'PORT_QOS_MAP', + 'QUEUE', 'TC_TO_PRIORITY_GROUP_MAP', 'MAP_PFC_PRIORITY_TO_QUEUE', 'TC_TO_QUEUE_MAP', @@ -642,14 +644,14 @@ def _clear_qos(): 'MPLS_TC_TO_TC_MAP', 'SCHEDULER', 'PFC_PRIORITY_TO_PRIORITY_GROUP_MAP', - 'PORT_QOS_MAP', 'WRED_PROFILE', - 'QUEUE', 'CABLE_LENGTH', - 'BUFFER_POOL', - 'BUFFER_PROFILE', 'BUFFER_PG', 'BUFFER_QUEUE', + 'BUFFER_PORT_INGRESS_PROFILE_LIST', + 'BUFFER_PORT_EGRESS_PROFILE_LIST', + 'BUFFER_PROFILE', + 'BUFFER_POOL', 'DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'LOSSLESS_TRAFFIC_PATTERN'] From f70dc27827a88d70e91e15ecdcde2ebbc446116d Mon Sep 17 00:00:00 2001 From: Vivek R Date: Sun, 3 Apr 2022 03:16:17 -0700 Subject: [PATCH 07/11] [techsupport] Handle minor fixes of TS Lock and update auto-TS (#2114) 1. Print the last statement as the techsupport dump name, as some automation processes might depend of parsing the last line to infer the dump path. Previously: handle_exit Removing lock. Exit: 0 removed '/tmp/techsupport-lock/PID' removed directory '/tmp/techsupport-lock' Updated: handle_exit Removing lock. Exit: 0 removed '/tmp/techsupport-lock/PID' removed directory '/tmp/techsupport-lock' /var/dump/sonic_dump_r-bulldog-03_20220324_195553.tar.gz 2. Don't acquire the lock when running in NOOP mode 3. Set the set -v option just before running main so that it won't print the generate_dump code to stdout 4. Update the auto-techsupport script to handle EXT_RETRY and EXT_LOCKFAIL exit codes returned by show techsupport command. 5. Update the minor error in since argument for auto-techsupport Signed-off-by: Vivek Keddy Karri --- scripts/coredump_gen_handler.py | 24 +++++++++----- scripts/generate_dump | 12 ++++--- tests/coredump_gen_handler_test.py | 36 +++++++++++++++++++-- utilities_common/auto_techsupport_helper.py | 10 ++++-- 4 files changed, 66 insertions(+), 16 deletions(-) diff --git a/scripts/coredump_gen_handler.py b/scripts/coredump_gen_handler.py index 33d661e3da..03ba2de89e 100644 --- a/scripts/coredump_gen_handler.py +++ b/scripts/coredump_gen_handler.py @@ -110,18 +110,26 @@ def parse_ts_dump_name(self, ts_stdout): syslog.syslog(syslog.LOG_ERR, "stdout of the 'show techsupport' cmd doesn't have the dump name") return "" - def invoke_ts_cmd(self, since_cfg): - since_cfg = "'" + since_cfg + "'" + def invoke_ts_cmd(self, since_cfg, num_retry=0): cmd_opts = ["show", "techsupport", "--silent", "--since", since_cfg] cmd = " ".join(cmd_opts) rc, stdout, stderr = subprocess_exec(cmd_opts, env=ENV_VAR) - if rc: + new_dump = "" + if rc == EXT_LOCKFAIL: + syslog.syslog(syslog.LOG_NOTICE, "Another instance of techsupport running, aborting this. stderr: {}".format(stderr)) + elif rc == EXT_RETRY: + if num_retry <= MAX_RETRY_LIMIT: + return self.invoke_ts_cmd(since_cfg, num_retry+1) + else: + syslog.syslog(syslog.LOG_ERR, "MAX_RETRY_LIMIT for show techsupport invocation exceeded, stderr: {}".format(stderr)) + elif rc != EXT_SUCCESS: syslog.syslog(syslog.LOG_ERR, "show techsupport failed with exit code {}, stderr: {}".format(rc, stderr)) - new_dump = self.parse_ts_dump_name(stdout) - if not new_dump: - syslog.syslog(syslog.LOG_ERR, "{} was run, but no techsupport dump is found".format(cmd)) - else: - syslog.syslog(syslog.LOG_INFO, "{} is successful, {} is created".format(cmd, new_dump)) + else: # EXT_SUCCESS + new_dump = self.parse_ts_dump_name(stdout) # Parse the dump name + if not new_dump: + syslog.syslog(syslog.LOG_ERR, "{} was run, but no techsupport dump is found".format(cmd)) + else: + syslog.syslog(syslog.LOG_INFO, "{} is successful, {} is created".format(cmd, new_dump)) return new_dump def verify_rate_limit_intervals(self, global_cooloff, container_cooloff): diff --git a/scripts/generate_dump b/scripts/generate_dump index ef06e929e1..85a7f6a057 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -63,6 +63,10 @@ handle_exit() ECODE=$? echo "Removing lock. Exit: $ECODE" >&2 $RM $V -rf ${LOCKDIR} + # Echo the filename as the last statement if the generation succeeds + if [[ -f $TARFILE && ($ECODE == $EXT_SUCCESS || $ECODE == $RETURN_CODE) ]]; then + echo $TARFILE + fi } handle_signal() @@ -1360,8 +1364,6 @@ main() { # Invoke the TechSupport Cleanup Hook setsid python3 /usr/local/bin/techsupport_cleanup.py ${TARFILE} &> /tmp/techsupport_cleanup.log & - - echo ${TARFILE} if ! $SAVE_STDERR then @@ -1494,7 +1496,6 @@ while getopts ":xnvhzas:t:r:d" opt; do ;; v) # echo commands about to be run to stderr - set -v V="-v" ;; n) @@ -1547,7 +1548,7 @@ fi ## Attempt Locking ## -if mkdir "${LOCKDIR}" &>/dev/null; then +if $MKDIR "${LOCKDIR}" &>/dev/null; then trap 'handle_exit' EXIT echo "$$" > "${PIDFILE}" # This handler will exit the script upon receiving these interrupts @@ -1555,6 +1556,9 @@ if mkdir "${LOCKDIR}" &>/dev/null; then trap 'handle_signal' SIGINT SIGHUP SIGQUIT SIGTERM echo "Lock succesfully accquired and installed signal handlers" # Proceed with the actual code + if [[ ! -z "${V}" ]]; then + set -v + fi main else # lock failed, check if the other PID is alive diff --git a/tests/coredump_gen_handler_test.py b/tests/coredump_gen_handler_test.py index e5270460eb..bf4ae8dc78 100644 --- a/tests/coredump_gen_handler_test.py +++ b/tests/coredump_gen_handler_test.py @@ -3,10 +3,12 @@ import sys import pyfakefs import unittest +import signal from pyfakefs.fake_filesystem_unittest import Patcher from swsscommon import swsscommon from utilities_common.general import load_module_from_source from utilities_common.db import Db +from utilities_common.auto_techsupport_helper import EXT_RETRY from .mock_tables import dbconnector sys.path.append("scripts") @@ -18,6 +20,9 @@ /tmp/saisdkdump """ +def signal_handler(signum, frame): + raise Exception("Timed out!") + def set_auto_ts_cfg(redis_mock, state="disabled", rate_limit_interval="0", max_core_size="0.0", @@ -264,7 +269,7 @@ def test_since_argument(self): def mock_cmd(cmd, env): ts_dump = "/var/dump/sonic_dump_random3.tar.gz" cmd_str = " ".join(cmd) - if "--since '4 days ago'" in cmd_str: + if "--since 4 days ago" in cmd_str: patcher.fs.create_file(ts_dump) return 0, AUTO_TS_STDOUT + ts_dump, "" elif "date --date=4 days ago" in cmd_str: @@ -330,7 +335,7 @@ def test_invalid_since_argument(self): def mock_cmd(cmd, env): ts_dump = "/var/dump/sonic_dump_random3.tar.gz" cmd_str = " ".join(cmd) - if "--since '2 days ago'" in cmd_str: + if "--since 2 days ago" in cmd_str: patcher.fs.create_file(ts_dump) print(AUTO_TS_STDOUT + ts_dump) return 0, AUTO_TS_STDOUT + ts_dump, "" @@ -396,3 +401,30 @@ def mock_cmd(cmd, env): assert "orchagent.12345.123.core.gz" in current_fs assert "lldpmgrd.12345.22.core.gz" in current_fs assert "python3.12345.21.core.gz" in current_fs + + def test_max_retry_ts_failure(self): + """ + Scenario: TS subprocess is continously returning EXT_RETRY + Make sure auto-ts is not exceeding the limit + """ + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, state="enabled") + set_feature_table_cfg(redis_mock, state="enabled") + with Patcher() as patcher: + def mock_cmd(cmd, env): + return EXT_RETRY, "", "" + + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", "swss", redis_mock) + + signal.signal(signal.SIGALRM, signal_handler) + signal.alarm(5) # 5 seconds + try: + cls.handle_core_dump_creation_event() + except Exception: + assert False, "Method should not time out" + finally: + signal.alarm(0) + diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index 0fba656521..4eaae933b0 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -11,8 +11,9 @@ "CORE_DUMP_DIR", "CORE_DUMP_PTRN", "TS_DIR", "TS_PTRN", "CFG_DB", "AUTO_TS", "CFG_STATE", "CFG_MAX_TS", "COOLOFF", "CFG_CORE_USAGE", "CFG_SINCE", "FEATURE", "STATE_DB", - "TS_MAP", "CORE_DUMP", "TIMESTAMP", "CONTAINER", - "TIME_BUF", "SINCE_DEFAULT", "TS_PTRN_GLOB" + "TS_MAP", "CORE_DUMP", "TIMESTAMP", "CONTAINER", "TIME_BUF", + "SINCE_DEFAULT", "TS_PTRN_GLOB", "EXT_LOCKFAIL", "EXT_RETRY", + "EXT_SUCCESS", "MAX_RETRY_LIMIT" ] + [ # Methods "verify_recent_file_creation", "get_ts_dumps", @@ -60,6 +61,11 @@ TIME_BUF = 20 SINCE_DEFAULT = "2 days ago" +# Techsupport Exit Codes +EXT_LOCKFAIL = 2 +EXT_RETRY = 4 +EXT_SUCCESS = 0 +MAX_RETRY_LIMIT = 2 # Helper methods def subprocess_exec(cmd, env=None): From 6bd54d0411061b5339c7cd39093179bbef7dc87d Mon Sep 17 00:00:00 2001 From: James Denton Date: Sun, 3 Apr 2022 18:27:10 -0500 Subject: [PATCH 08/11] Fix 'show mac' output when FDB entry for default vlan is None instead of 1 (#2126) * Fix 'show mac' output when FDB entry for native vlan is NoneType Signed-off-by: James Denton --- scripts/fdbshow | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/fdbshow b/scripts/fdbshow index cbf8b40394..475a5dee7d 100755 --- a/scripts/fdbshow +++ b/scripts/fdbshow @@ -130,7 +130,9 @@ class FdbShow(object): except Exception: vlan_id = bvid print("Failed to get Vlan id for bvid {}\n".format(bvid)) - self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,) + (fdb_type,)) + + if vlan_id is not None: + self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,) + (fdb_type,)) self.bridge_mac_list.sort(key = lambda x: x[0]) return From 4010bd0945ff2a59d570118aac552b5bf0602172 Mon Sep 17 00:00:00 2001 From: kktheballer Date: Mon, 4 Apr 2022 22:51:59 -0700 Subject: [PATCH 09/11] FGNHG CLI changes (#1588) 1. Improve CLI output format 2. Adapt CLI to match-mode based next-hop alias mapping Co-authored-by: Kavin Kamaraj --- show/fgnhg.py | 191 +++++++++++++++++------------- tests/fgnhg_test.py | 194 +++++-------------------------- tests/mock_tables/config_db.json | 28 +++++ tests/mock_tables/state_db.json | 64 +++------- 4 files changed, 180 insertions(+), 297 deletions(-) diff --git a/show/fgnhg.py b/show/fgnhg.py index a8b12787ce..320feafe25 100644 --- a/show/fgnhg.py +++ b/show/fgnhg.py @@ -1,6 +1,4 @@ -import ipaddress from collections import OrderedDict - import click import utilities_common.cli as clicommon from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector @@ -12,32 +10,28 @@ def fgnhg(): """Show FGNHG information""" pass - @fgnhg.command() @click.argument('nhg', required=False) def active_hops(nhg): config_db = ConfigDBConnector() config_db.connect() - fg_nhg_prefix_table = {} - fg_nhg_alias = {} - fg_nhg_prefix_table = config_db.get_table('FG_NHG_PREFIX') - - for key, value in fg_nhg_prefix_table.items(): - fg_nhg_alias[key] = value['FG_NHG'] - state_db = SonicV2Connector(host='127.0.0.1') state_db.connect(state_db.STATE_DB, False) # Make one attempt only STATE_DB - TABLE_NAME_SEPARATOR = '|' prefix = 'FG_ROUTE_TABLE' + TABLE_NAME_SEPARATOR _hash = '{}{}'.format(prefix, '*') table_keys = [] - table_keys = state_db.keys(state_db.STATE_DB, _hash) t_dict = {} - header = ["FG_NHG_PREFIX", "Active Next Hops"] + header = ["FG NHG Prefix", "Active Next Hops"] table = [] output_dict = {} - + ctx = click.get_current_context() + try: + table_keys = sorted(state_db.keys(state_db.STATE_DB, _hash)) + except Exception as e: + ctx.fail("FG_ROUTE_TABLE does not exist!") + if table_keys is None: + ctx.fail("FG_ROUTE_TABLE does not exist!") if nhg is None: for nhg_prefix in table_keys: t_dict = state_db.get_all(state_db.STATE_DB, nhg_prefix) @@ -47,123 +41,154 @@ def active_hops(nhg): output_dict[nhg_prefix].append(nh_ip.split("@")[0]) else: output_dict[nhg_prefix] = [nh_ip.split("@")[0]] - nhg_prefix_report = (nhg_prefix.split("|")[1]) formatted_nhps = ','.replace(',', '\n').join(output_dict[nhg_prefix]) table.append([nhg_prefix_report, formatted_nhps]) - click.echo(tabulate(table, header, tablefmt="grid")) - + click.echo(tabulate(table, header, tablefmt="simple")) else: - for nhg_prefix, alias in fg_nhg_alias.items(): - if nhg == alias: - if ":" in nhg_prefix: - for key in table_keys: - mod_key = key.split("|")[1].split("/")[0] - mod_nhg_prefix = nhg_prefix.split("/")[0] - if ipaddress.ip_address(mod_key).exploded == ipaddress.ip_address(mod_nhg_prefix).exploded: - t_dict = state_db.get_all(state_db.STATE_DB, key) - nhg_prefix = "FG_ROUTE_TABLE|" + nhg_prefix - else: - nhg_prefix = "FG_ROUTE_TABLE|" + nhg_prefix - t_dict = state_db.get_all(state_db.STATE_DB, nhg_prefix) - + nhip_prefix_map = {} + header = ["FG NHG Prefix", "Active Next Hops"] + try: + fg_nhg_member_table = config_db.get_table('FG_NHG_MEMBER') + except Exception as e: + ctx.fail("FG_NHG_MEMBER entries not present in config_db") + alias_list = [] + nexthop_alias = {} + output_list = [] + for nexthop, nexthop_metadata in fg_nhg_member_table.items(): + alias_list.append(nexthop_metadata['FG_NHG']) + nexthop_alias[nexthop] = nexthop_metadata['FG_NHG'] + if nhg not in alias_list: + ctx.fail("Please provide a valid NHG alias") + else: + for nhg_prefix in table_keys: + t_dict = state_db.get_all(state_db.STATE_DB, nhg_prefix) vals = sorted(set([val for val in t_dict.values()])) - for nh_ip in vals: - if nhg_prefix in output_dict: - output_dict[nhg_prefix].append(nh_ip.split("@")[0]) + nhip_prefix_map[nh_ip.split("@")[0]] = nhg_prefix + + if nh_ip.split("@")[0] in nexthop_alias: + if nexthop_alias[nh_ip.split("@")[0]] == nhg: + output_list.append(nh_ip.split("@")[0]) else: - output_dict[nhg_prefix] = [nh_ip.split("@")[0]] - - nhg_prefix_report = (nhg_prefix.split("|")[1]) - formatted_nhps = ','.replace(',', '\n').join(output_dict[nhg_prefix]) - table.append([nhg_prefix_report, formatted_nhps]) - click.echo(tabulate(table, header, tablefmt="grid")) - + ctx.fail("state_db and config_db have FGNHG prefix config mismatch. Check device config!"); + output_list = sorted(output_list) + if not output_list: + ctx.fail("FG_ROUTE table likely does not contain the required entries") + nhg_prefix_report = nhip_prefix_map[output_list[0]].split("|")[1] + formatted_output_list = ','.replace(',', '\n').join(output_list) + table.append([nhg_prefix_report, formatted_output_list]) + click.echo(tabulate(table, header, tablefmt="simple")) @fgnhg.command() @click.argument('nhg', required=False) def hash_view(nhg): config_db = ConfigDBConnector() config_db.connect() - fg_nhg_prefix_table = {} - fg_nhg_alias = {} - fg_nhg_prefix_table = config_db.get_table('FG_NHG_PREFIX') - - for key, value in fg_nhg_prefix_table.items(): - fg_nhg_alias[key] = value['FG_NHG'] - state_db = SonicV2Connector(host='127.0.0.1') state_db.connect(state_db.STATE_DB, False) # Make one attempt only STATE_DB - TABLE_NAME_SEPARATOR = '|' prefix = 'FG_ROUTE_TABLE' + TABLE_NAME_SEPARATOR _hash = '{}{}'.format(prefix, '*') table_keys = [] - table_keys = state_db.keys(state_db.STATE_DB, _hash) t_dict = {} - header = ["FG_NHG_PREFIX", "Next Hop", "Hash buckets"] + header = ["FG NHG Prefix", "Next Hop", "Hash buckets"] table = [] output_dict = {} bank_dict = {} - + ctx = click.get_current_context() + try: + table_keys = sorted(state_db.keys(state_db.STATE_DB, _hash)) + except Exception as e: + ctx.fail("FG_ROUTE_TABLE does not exist!") + if table_keys is None: + ctx.fail("FG_ROUTE_TABLE does not exist!") if nhg is None: for nhg_prefix in table_keys: bank_dict = {} t_dict = state_db.get_all(state_db.STATE_DB, nhg_prefix) vals = sorted(set([val for val in t_dict.values()])) - for nh_ip in vals: bank_ids = sorted([int(k) for k, v in t_dict.items() if v == nh_ip]) - bank_ids = [str(x) for x in bank_ids] - if nhg_prefix in output_dict: output_dict[nhg_prefix].append(nh_ip.split("@")[0]) else: output_dict[nhg_prefix] = [nh_ip.split("@")[0]] bank_dict[nh_ip.split("@")[0]] = bank_ids - bank_dict = OrderedDict(sorted(bank_dict.items())) nhg_prefix_report = (nhg_prefix.split("|")[1]) - for nhip, val in bank_dict.items(): - formatted_banks = ','.replace(',', '\n').join(bank_dict[nhip]) - table.append([nhg_prefix_report, nhip, formatted_banks]) - - click.echo(tabulate(table, header, tablefmt="grid")) - + displayed_banks = [] + bank_output = "" + formatted_banks = (bank_dict[nhip]) + for bankid in formatted_banks: + if (len(str(bankid)) == 1): + displayed_banks.append(str(bankid) + " ") + + if (len(str(bankid)) == 2): + displayed_banks.append(str(bankid) + " ") + + if (len(str(bankid)) == 3): + displayed_banks.append(str(bankid)) + for i in range (0, len(displayed_banks), 8): + bank_output = bank_output + " ".join(displayed_banks[i:i+8]) + "\n" + bank_output = bank_output + "\n" + table.append([nhg_prefix_report, nhip, bank_output]) + click.echo(tabulate(table, header, tablefmt="simple")) else: - for nhg_prefix, alias in fg_nhg_alias.items(): - if nhg == alias: - if ":" in nhg_prefix: - for key in table_keys: - mod_key = key.split("|")[1].split("/")[0] - mod_nhg_prefix = nhg_prefix.split("/")[0] - if ipaddress.ip_address(mod_key).exploded == ipaddress.ip_address(mod_nhg_prefix).exploded: - t_dict = state_db.get_all(state_db.STATE_DB, key) - nhg_prefix = "FG_ROUTE_TABLE|" + nhg_prefix - else: - nhg_prefix = "FG_ROUTE_TABLE|" + nhg_prefix - t_dict = state_db.get_all(state_db.STATE_DB, nhg_prefix) - + header = ["FG NHG Prefix", "Next Hop", "Hash buckets"] + try: + fg_nhg_member_table = config_db.get_table('FG_NHG_MEMBER') + except Exception as e: + ctx.fail("FG_NHG_MEMBER entries not present in config_db") + alias_list = [] + nexthop_alias = {} + for nexthop, nexthop_metadata in fg_nhg_member_table.items(): + alias_list.append(nexthop_metadata['FG_NHG']) + nexthop_alias[nexthop] = nexthop_metadata['FG_NHG'] + if nhg not in alias_list: + ctx.fail("Please provide a valid NHG alias") + else: + nhip_prefix_map = {} + for nhg_prefix in table_keys: + bank_dict = {} + t_dict = state_db.get_all(state_db.STATE_DB, nhg_prefix) vals = sorted(set([val for val in t_dict.values()])) - for nh_ip in vals: bank_ids = sorted([int(k) for k, v in t_dict.items() if v == nh_ip]) + nhip_prefix_map[nh_ip.split("@")[0]] = nhg_prefix bank_ids = [str(x) for x in bank_ids] if nhg_prefix in output_dict: output_dict[nhg_prefix].append(nh_ip.split("@")[0]) else: output_dict[nhg_prefix] = [nh_ip.split("@")[0]] bank_dict[nh_ip.split("@")[0]] = bank_ids - - nhg_prefix_report = (nhg_prefix.split("|")[1]) bank_dict = OrderedDict(sorted(bank_dict.items())) - - for nhip, val in bank_dict.items(): - formatted_banks = ','.replace(',', '\n').join(bank_dict[nhip]) - table.append([nhg_prefix_report, nhip, formatted_banks]) - - click.echo(tabulate(table, header, tablefmt="grid")) + output_bank_dict = {} + for nexthop, banks in bank_dict.items(): + if nexthop in nexthop_alias: + if nexthop_alias[nexthop] == nhg: + output_bank_dict[nexthop] = banks + else: + ctx.fail("state_db and config_db have FGNHG prefix config mismatch. Check device config!"); + nhg_prefix_report = nhip_prefix_map[list(bank_dict.keys())[0]].split("|")[1] + output_bank_dict = OrderedDict(sorted(output_bank_dict.items())) + for nhip, val in output_bank_dict.items(): + bank_output = "" + displayed_banks = [] + formatted_banks = (bank_dict[nhip]) + for bankid in formatted_banks: + if (len(str(bankid)) == 1): + displayed_banks.append(str(bankid) + " ") + + if (len(str(bankid)) == 2): + displayed_banks.append(str(bankid) + " ") + + if (len(str(bankid)) == 3): + displayed_banks.append(str(bankid)) + for i in range (0, len(displayed_banks), 8): + bank_output = bank_output + " ".join(displayed_banks[i:i+8]) + "\n" + table.append([nhg_prefix_report, nhip, bank_output]) + click.echo(tabulate(table, header, tablefmt="simple")) diff --git a/tests/fgnhg_test.py b/tests/fgnhg_test.py index bfa08509dd..843a221922 100644 --- a/tests/fgnhg_test.py +++ b/tests/fgnhg_test.py @@ -9,187 +9,49 @@ show_fgnhg_hash_view_output="""\ -+-----------------+--------------------+----------------+ -| FG_NHG_PREFIX | Next Hop | Hash buckets | -+=================+====================+================+ -| fc:5::/128 | 200:200:200:200::4 | 0 | -| | | 1 | -| | | 2 | -| | | 3 | -| | | 4 | -| | | 5 | -| | | 6 | -| | | 7 | -| | | 8 | -| | | 9 | -| | | 10 | -| | | 11 | -| | | 12 | -| | | 13 | -| | | 14 | -| | | 15 | -+-----------------+--------------------+----------------+ -| fc:5::/128 | 200:200:200:200::5 | 16 | -| | | 17 | -| | | 18 | -| | | 19 | -| | | 20 | -| | | 21 | -| | | 22 | -| | | 23 | -| | | 24 | -| | | 25 | -| | | 26 | -| | | 27 | -| | | 28 | -| | | 29 | -| | | 30 | -| | | 31 | -+-----------------+--------------------+----------------+ -| 100.50.25.12/32 | 200.200.200.4 | 0 | -| | | 1 | -| | | 2 | -| | | 3 | -| | | 4 | -| | | 5 | -| | | 6 | -| | | 7 | -| | | 8 | -| | | 9 | -| | | 10 | -| | | 11 | -| | | 12 | -| | | 13 | -| | | 14 | -| | | 15 | -+-----------------+--------------------+----------------+ -| 100.50.25.12/32 | 200.200.200.5 | 16 | -| | | 17 | -| | | 18 | -| | | 19 | -| | | 20 | -| | | 21 | -| | | 22 | -| | | 23 | -| | | 24 | -| | | 25 | -| | | 26 | -| | | 27 | -| | | 28 | -| | | 29 | -| | | 30 | -| | | 31 | -+-----------------+--------------------+----------------+ +FG NHG Prefix Next Hop Hash buckets +--------------- ------------------ ------------------------------ +100.50.25.12/32 200.200.200.4 0 1 2 3 4 5 6 7 +100.50.25.12/32 200.200.200.5 8 9 10 11 12 13 14 15 +fc:5::/128 200:200:200:200::4 0 1 2 3 4 5 6 7 +fc:5::/128 200:200:200:200::5 8 9 10 11 12 13 14 15 """ show_fgnhgv4_hash_view_output="""\ -+-----------------+---------------+----------------+ -| FG_NHG_PREFIX | Next Hop | Hash buckets | -+=================+===============+================+ -| 100.50.25.12/32 | 200.200.200.4 | 0 | -| | | 1 | -| | | 2 | -| | | 3 | -| | | 4 | -| | | 5 | -| | | 6 | -| | | 7 | -| | | 8 | -| | | 9 | -| | | 10 | -| | | 11 | -| | | 12 | -| | | 13 | -| | | 14 | -| | | 15 | -+-----------------+---------------+----------------+ -| 100.50.25.12/32 | 200.200.200.5 | 16 | -| | | 17 | -| | | 18 | -| | | 19 | -| | | 20 | -| | | 21 | -| | | 22 | -| | | 23 | -| | | 24 | -| | | 25 | -| | | 26 | -| | | 27 | -| | | 28 | -| | | 29 | -| | | 30 | -| | | 31 | -+-----------------+---------------+----------------+ +FG NHG Prefix Next Hop Hash buckets +--------------- ------------- ------------------------------ +100.50.25.12/32 200.200.200.4 0 1 2 3 4 5 6 7 +100.50.25.12/32 200.200.200.5 8 9 10 11 12 13 14 15 """ show_fgnhgv6_hash_view_output="""\ -+-----------------+--------------------+----------------+ -| FG_NHG_PREFIX | Next Hop | Hash buckets | -+=================+====================+================+ -| fc:05::/128 | 200:200:200:200::4 | 0 | -| | | 1 | -| | | 2 | -| | | 3 | -| | | 4 | -| | | 5 | -| | | 6 | -| | | 7 | -| | | 8 | -| | | 9 | -| | | 10 | -| | | 11 | -| | | 12 | -| | | 13 | -| | | 14 | -| | | 15 | -+-----------------+--------------------+----------------+ -| fc:05::/128 | 200:200:200:200::5 | 16 | -| | | 17 | -| | | 18 | -| | | 19 | -| | | 20 | -| | | 21 | -| | | 22 | -| | | 23 | -| | | 24 | -| | | 25 | -| | | 26 | -| | | 27 | -| | | 28 | -| | | 29 | -| | | 30 | -| | | 31 | -+-----------------+--------------------+----------------+ +FG NHG Prefix Next Hop Hash buckets +--------------- ------------------ ------------------------------ +fc:5::/128 200:200:200:200::4 0 1 2 3 4 5 6 7 +fc:5::/128 200:200:200:200::5 8 9 10 11 12 13 14 15 """ show_fgnhg_active_hops_output="""\ -+-----------------+--------------------+ -| FG_NHG_PREFIX | Active Next Hops | -+=================+====================+ -| fc:5::/128 | 200:200:200:200::4 | -| | 200:200:200:200::5 | -+-----------------+--------------------+ -| 100.50.25.12/32 | 200.200.200.4 | -| | 200.200.200.5 | -+-----------------+--------------------+ +FG NHG Prefix Active Next Hops +--------------- ------------------ +100.50.25.12/32 200.200.200.4 + 200.200.200.5 +fc:5::/128 200:200:200:200::4 + 200:200:200:200::5 """ show_fgnhgv4_active_hops_output="""\ -+-----------------+--------------------+ -| FG_NHG_PREFIX | Active Next Hops | -+=================+====================+ -| 100.50.25.12/32 | 200.200.200.4 | -| | 200.200.200.5 | -+-----------------+--------------------+ +FG NHG Prefix Active Next Hops +--------------- ------------------ +100.50.25.12/32 200.200.200.4 + 200.200.200.5 """ show_fgnhgv6_active_hops_output="""\ -+-----------------+--------------------+ -| FG_NHG_PREFIX | Active Next Hops | -+=================+====================+ -| fc:05::/128 | 200:200:200:200::4 | -| | 200:200:200:200::5 | -+-----------------+--------------------+ +FG NHG Prefix Active Next Hops +--------------- ------------------ +fc:5::/128 200:200:200:200::4 + 200:200:200:200::5 """ diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 9b618df3b3..2f4002cb9f 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1111,6 +1111,34 @@ "FG_NHG_PREFIX|fc:05::/128": { "FG_NHG": "fgnhg_v6" }, + "FG_NHG|fgnhg_v4": { + "bucket_size": "120", + "match_mode": "nexthop-based" + }, + "FG_NHG|fgnhg_v6": { + "bucket_size": "120", + "match_mode": "nexthop-based" + }, + "FG_NHG_MEMBER|200.200.200.4": { + "FG_NHG": "fgnhg_v4", + "bank": "0", + "link": "Ethernet8" + }, + "FG_NHG_MEMBER|200.200.200.5": { + "FG_NHG": "fgnhg_v4", + "bank": "1", + "link": "Ethernet12" + }, + "FG_NHG_MEMBER|200:200:200:200::4": { + "FG_NHG": "fgnhg_v6", + "bank": "0", + "link": "Ethernet8" + }, + "FG_NHG_MEMBER|200:200:200:200::5": { + "FG_NHG": "fgnhg_v6", + "bank": "1", + "link": "Ethernet12" + }, "BGP_NEIGHBOR|10.0.0.1": { "asn": "65200", "holdtime": "10", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 1cd881ac71..958b7a8e03 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -438,30 +438,14 @@ "5": "200:200:200:200::4@Vlan1000", "6": "200:200:200:200::4@Vlan1000", "7": "200:200:200:200::4@Vlan1000", - "8": "200:200:200:200::4@Vlan1000", - "9": "200:200:200:200::4@Vlan1000", - "10": "200:200:200:200::4@Vlan1000", - "11": "200:200:200:200::4@Vlan1000", - "12": "200:200:200:200::4@Vlan1000", - "13": "200:200:200:200::4@Vlan1000", - "14": "200:200:200:200::4@Vlan1000", - "15": "200:200:200:200::4@Vlan1000", - "16": "200:200:200:200::5@Vlan1000", - "17": "200:200:200:200::5@Vlan1000", - "18": "200:200:200:200::5@Vlan1000", - "19": "200:200:200:200::5@Vlan1000", - "20": "200:200:200:200::5@Vlan1000", - "21": "200:200:200:200::5@Vlan1000", - "22": "200:200:200:200::5@Vlan1000", - "23": "200:200:200:200::5@Vlan1000", - "24": "200:200:200:200::5@Vlan1000", - "25": "200:200:200:200::5@Vlan1000", - "26": "200:200:200:200::5@Vlan1000", - "27": "200:200:200:200::5@Vlan1000", - "28": "200:200:200:200::5@Vlan1000", - "29": "200:200:200:200::5@Vlan1000", - "30": "200:200:200:200::5@Vlan1000", - "31": "200:200:200:200::5@Vlan1000" + "8": "200:200:200:200::5@Vlan1000", + "9": "200:200:200:200::5@Vlan1000", + "10": "200:200:200:200::5@Vlan1000", + "11": "200:200:200:200::5@Vlan1000", + "12": "200:200:200:200::5@Vlan1000", + "13": "200:200:200:200::5@Vlan1000", + "14": "200:200:200:200::5@Vlan1000", + "15": "200:200:200:200::5@Vlan1000" }, "FG_ROUTE_TABLE|100.50.25.12/32": { "0": "200.200.200.4@Vlan1000", @@ -472,30 +456,14 @@ "5": "200.200.200.4@Vlan1000", "6": "200.200.200.4@Vlan1000", "7": "200.200.200.4@Vlan1000", - "8": "200.200.200.4@Vlan1000", - "9": "200.200.200.4@Vlan1000", - "10": "200.200.200.4@Vlan1000", - "11": "200.200.200.4@Vlan1000", - "12": "200.200.200.4@Vlan1000", - "13": "200.200.200.4@Vlan1000", - "14": "200.200.200.4@Vlan1000", - "15": "200.200.200.4@Vlan1000", - "16": "200.200.200.5@Vlan1000", - "17": "200.200.200.5@Vlan1000", - "18": "200.200.200.5@Vlan1000", - "19": "200.200.200.5@Vlan1000", - "20": "200.200.200.5@Vlan1000", - "21": "200.200.200.5@Vlan1000", - "22": "200.200.200.5@Vlan1000", - "23": "200.200.200.5@Vlan1000", - "24": "200.200.200.5@Vlan1000", - "25": "200.200.200.5@Vlan1000", - "26": "200.200.200.5@Vlan1000", - "27": "200.200.200.5@Vlan1000", - "28": "200.200.200.5@Vlan1000", - "29": "200.200.200.5@Vlan1000", - "30": "200.200.200.5@Vlan1000", - "31": "200.200.200.5@Vlan1000" + "8": "200.200.200.5@Vlan1000", + "9": "200.200.200.5@Vlan1000", + "10": "200.200.200.5@Vlan1000", + "11": "200.200.200.5@Vlan1000", + "12": "200.200.200.5@Vlan1000", + "13": "200.200.200.5@Vlan1000", + "14": "200.200.200.5@Vlan1000", + "15": "200.200.200.5@Vlan1000" }, "REBOOT_CAUSE|2020_10_09_04_53_58": { "cause": "warm-reboot", From 9e2fbf40ed27fe89cd82591d42d71ac86f594877 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Wed, 6 Apr 2022 01:09:14 +0800 Subject: [PATCH 10/11] Update db_migrator to support `pfcwd_sw_enable` (#2087) Signed-off-by: bingwang bingwang@microsoft.com What I did This PR is to update db_migrator to support pfcwd_sw_enable. Currently, table pfc_enable is to specify on which queue to enable PFC and pfc watchdog. As we are going to add two extra lossless queues on which on watchdog is enable(HLD Azure/SONiC#950), a new field is required to specify on which queue to enable PFC watchdog. That is pfcwd_sw_enable. Field Description pfc_enable Specify on which queues to enable PFC pfcwd_sw_enable Specify on which queues to enable software PFC watchdog HLD DSCP remapping Change in orchagent Update orchagent to support new field pfcwd_sw_enable How I did it Update db_migrator. How to verify it Verified by UT Verified by copying the updated db_migrator.py to a SONiC box and run db_migrator.py -o migrate --- scripts/db_migrator.py | 21 +++++++++-- .../config_db/qos_map_table_expected.json | 36 +++++++++++++++++++ .../config_db/qos_map_table_input.json | 34 ++++++++++++++++++ tests/db_migrator_test.py | 27 +++++++++++++- 4 files changed, 115 insertions(+), 3 deletions(-) create mode 100644 tests/db_migrator_input/config_db/qos_map_table_expected.json create mode 100644 tests/db_migrator_input/config_db/qos_map_table_input.json diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index ecb40d880f..e41bef1334 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -44,7 +44,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_2_0_4' + self.CURRENT_VERSION = 'version_2_0_5' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -665,9 +665,26 @@ def version_2_0_3(self): def version_2_0_4(self): """ - Current latest version. Nothing to do here. + Version 2_0_4 """ log.log_info('Handling version_2_0_4') + # Migrate "pfc_enable" to "pfc_enable" and "pfcwd_sw_enable" + # 1. pfc_enable means enable pfc on certain queues + # 2. pfcwd_sw_enable means enable PFC software watchdog on certain queues + # By default, PFC software watchdog is enabled on all pfc enabled queues. + qos_maps = self.configDB.get_table('PORT_QOS_MAP') + for k, v in qos_maps.items(): + if 'pfc_enable' in v: + v['pfcwd_sw_enable'] = v['pfc_enable'] + self.configDB.set_entry('PORT_QOS_MAP', k, v) + + return 'version_2_0_5' + + def version_2_0_5(self): + """ + Current latest version. Nothing to do here. + """ + log.log_info('Handling version_2_0_5') return None def get_version(self): diff --git a/tests/db_migrator_input/config_db/qos_map_table_expected.json b/tests/db_migrator_input/config_db/qos_map_table_expected.json new file mode 100644 index 0000000000..946f7f02d9 --- /dev/null +++ b/tests/db_migrator_input/config_db/qos_map_table_expected.json @@ -0,0 +1,36 @@ +{ + "VERSIONS|DATABASE": { + "VERSION": "version_2_0_5" + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfcwd_sw_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "Ethernet100": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfcwd_sw_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "Ethernet92": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "Ethernet96": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + } + } +} + diff --git a/tests/db_migrator_input/config_db/qos_map_table_input.json b/tests/db_migrator_input/config_db/qos_map_table_input.json new file mode 100644 index 0000000000..c7dbce9658 --- /dev/null +++ b/tests/db_migrator_input/config_db/qos_map_table_input.json @@ -0,0 +1,34 @@ +{ + "VERSIONS|DATABASE": { + "VERSION": "version_2_0_4" + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "Ethernet100": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "Ethernet92": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + }, + "Ethernet96": { + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]" + } + } +} + diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 0cda3d37ac..96e139b934 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -281,7 +281,7 @@ def setup_class(cls): def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" dbconnector.dedicated_dbs['CONFIG_DB'] = None - + def test_lacp_key_migrator(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'portchannel-input') import db_migrator @@ -348,3 +348,28 @@ def test_qos_buffer_migrator_for_cold_reboot(self): self.check_config_db(dbmgtr.configDB, expected_db.cfgdb) self.check_appl_db(dbmgtr.appDB, expected_appl_db) self.clear_dedicated_mock_dbs() + + +class TestPfcEnableMigrator(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def test_pfc_enable_migrator(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'qos_map_table_input') + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'qos_map_table_expected') + expected_db = Db() + + resulting_table = dbmgtr.configDB.get_table('PORT_QOS_MAP') + expected_table = expected_db.cfgdb.get_table('PORT_QOS_MAP') + + diff = DeepDiff(resulting_table, expected_table, ignore_order=True) + assert not diff From 52ca3245e8fe0208bf02f4c51c916ccca030fc74 Mon Sep 17 00:00:00 2001 From: Praveen Chaudhary Date: Thu, 7 Apr 2022 23:41:38 -0700 Subject: [PATCH 11/11] [config/config_mgmt.py]: Fix dpb issue with upper case mac in (#2066) device_metadata. libYang converts ietf yang types to lower case internally,which creates false config diff for us while DPB. This PR fixes the issue by not precessing false diff. Related issue" https://github.com/Azure/sonic-buildimage/issues/9478 #### What I did fixes issue: https://github.com/Azure/sonic-buildimage/issues/9478 #### How I did it libYang converts ietf yang types to lower case internally,which creates false config diff for us while DPB. Example: For DEVICE_METADATA['localhost']['mac'] type is yang:mac-address. Libyang converts from 'XX:XX:XX:E4:B3:DD' -> 'xx:xx:xx:e4:b3:dd' so args for function _recurCreateConfig in this case will be: diff = DEVICE_METADATA['localhost']['mac'] where DEVICE_METADATA': {'localhost': {'mac': ['XX:XX:XX:E4:B3:DD', 'xx:xx:xx:e4:b3:dd']}}} Note: above dict is representation of diff in config given by diffJson library. out = 'XX:XX:XX:e4:b3:dd' inp = 'xx:xx:xx:E4:B3:DD' I add a check to avoid processing of such config diff for DPB. #### How to verify it Added a unit test. Build time. --- config/config_mgmt.py | 21 ++++++++++++ tests/config_mgmt_test.py | 72 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) diff --git a/config/config_mgmt.py b/config/config_mgmt.py index bbed677fa6..3b975e7d9b 100644 --- a/config/config_mgmt.py +++ b/config/config_mgmt.py @@ -872,6 +872,27 @@ def _recurCreateConfig(diff, inp, outp, config): # we do not allow updates right now if isinstance(diff, list) and isinstance(outp, dict): return changed + ''' + libYang converts ietf yang types to lower case internally, which + creates false config diff for us while DPB. + + Example: + For DEVICE_METADATA['localhost']['mac'] type is yang:mac-address. + Libyang converts from 'XX:XX:XX:E4:B3:DD' -> 'xx:xx:xx:e4:b3:dd' + so args for this functions will be: + + diff = DEVICE_METADATA['localhost']['mac'] + where DEVICE_METADATA': {'localhost': {'mac': ['XX:XX:XX:E4:B3:DD', 'xx:xx:xx:e4:b3:dd']}}} + Note: above dict is representation of diff in config given by diffJson + library. + out = 'XX:XX:XX:e4:b3:dd' + inp = 'xx:xx:xx:E4:B3:DD' + + With below check, we will avoid processing of such config diff for DPB. + ''' + if isinstance(diff, list) and isinstance(outp, str) and \ + inp.lower() == outp.lower(): + return changed idx = -1 for key in diff: diff --git a/tests/config_mgmt_test.py b/tests/config_mgmt_test.py index 41b7f5883d..c5509366a8 100644 --- a/tests/config_mgmt_test.py +++ b/tests/config_mgmt_test.py @@ -57,6 +57,78 @@ def test_search_keys(self): len(out['ACL_TABLE'][k]) == 1 return + def test_upper_case_mac_fix(self): + ''' + Issue: + https://github.com/Azure/sonic-buildimage/issues/9478 + + LibYang converts ietf yang types to lower case internally,which + creates false config diff for us while DPB. + This tests is to verify the fix done in config_mgmt.py to resolve this + issue. + + Example: + For DEVICE_METADATA['localhost']['mac'] type is yang:mac-address. + Libyang converts from 'XX:XX:XX:E4:B3:DD' -> 'xx:xx:xx:e4:b3:dd' + ''' + curConfig = deepcopy(configDbJson) + # Keep only PORT part to skip dependencies. + curConfig = {'PORT': curConfig['PORT']} + # add DEVICE_METADATA Config + curConfig['DEVICE_METADATA'] = { + "localhost": { + "bgp_asn": "65100", + "default_bgp_status": "up", + "default_pfcwd_status": "disable", + "docker_routing_config_mode": "split", + "hostname": "sonic", + "hwsku": "Seastone-DX010", + "mac": "00:11:22:BB:CC:DD", + "platform": "x86_64-cel_seastone-r0", + "type": "LeafRouter" + } + } + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=0, laneIdx=65, \ + curMode='4x25G', newMode='2x50G') + + # use side effect to mock _createConfigToLoad but with call to same + # function + cmdpb._createConfigToLoad = mock.MagicMock(side_effect=cmdpb._createConfigToLoad) + + # Try to breakout and see if writeConfigDB is called thrice + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, \ + force=True, loadDefConfig=False) + + ''' + assert call to _createConfigToLoad has + DEVICE_METADATA': {'localhost': {'mac': ['XX:XX:XX:E4:B3:DD', + 'xx:xx:xx:e4:b3:dd']}} in diff + ''' + (args, kwargs) = cmdpb._createConfigToLoad.call_args_list[0] + print(args) + # in case of tuple get first arg, which is diff + if type(args) == tuple: + args = args[0] + assert args['DEVICE_METADATA']['localhost']['mac'] == \ + ['00:11:22:BB:CC:DD', '00:11:22:bb:cc:dd'] + + # verify correct function call to writeConfigDB + assert cmdpb.writeConfigDB.call_count == 3 + print(cmdpb.writeConfigDB.call_args_list[0]) + # args is populated if call is done as writeConfigDB(a, b), kwargs is + # populated if call is done as writeConfigDB(A=a, B=b) + (args, kwargs) = cmdpb.writeConfigDB.call_args_list[0] + print(args) + # in case of tuple also, we should have only one element writeConfigDB + if type(args) == tuple: + args = args[0] + # Make sure no DEVICE_METADATA in the args to func + assert "DEVICE_METADATA" not in args + + return + @pytest.mark.skip(reason="not stable") def test_break_out(self): # prepare default config