diff --git a/.gitignore b/.gitignore index 001db00e4b..4e61e19163 100644 --- a/.gitignore +++ b/.gitignore @@ -83,6 +83,8 @@ tests/mock_tests/tests_portsyncd # Test Files # ############## +*gcda +*gcno tests/log tests/mock_tests/test-suite.log tests/mock_tests/tests.log @@ -92,5 +94,3 @@ tests/tests.log tests/tests.trs tests/mock_tests/**/*log tests/mock_tests/**/*trs -orchagent/p4orch/tests/**/*gcda -orchagent/p4orch/tests/**/*gcno diff --git a/cfgmgr/teammgr.cpp b/cfgmgr/teammgr.cpp index d72b522047..f6c6394cdb 100644 --- a/cfgmgr/teammgr.cpp +++ b/cfgmgr/teammgr.cpp @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include @@ -171,18 +173,29 @@ void TeamMgr::cleanTeamProcesses() SWSS_LOG_ENTER(); SWSS_LOG_NOTICE("Cleaning up LAGs during shutdown..."); - std::unordered_map aliasPidMap; + std::unordered_map aliasPidMap; for (const auto& alias: m_lagList) { - std::string res; pid_t pid; + // Sleep for 10 milliseconds so as to not overwhelm the netlink + // socket buffers with events about interfaces going down + std::this_thread::sleep_for(std::chrono::milliseconds(10)); try { - std::stringstream cmd; - cmd << "cat " << shellquote("/var/run/teamd/" + alias + ".pid"); - EXEC_WITH_ERROR_THROW(cmd.str(), res); + ifstream pidFile("/var/run/teamd/" + alias + ".pid"); + if (pidFile.is_open()) + { + pidFile >> pid; + aliasPidMap[alias] = pid; + SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); + } + else + { + SWSS_LOG_NOTICE("Unable to read pid file for %s, skipping...", alias.c_str()); + continue; + } } catch (const std::exception &e) { @@ -191,32 +204,15 @@ void TeamMgr::cleanTeamProcesses() continue; } - try - { - pid = static_cast(std::stoul(res, nullptr, 10)); - aliasPidMap[alias] = pid; - - SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); - } - catch (const std::exception &e) + if (kill(pid, SIGTERM)) { - SWSS_LOG_ERROR("Failed to read port channel %s pid: %s", alias.c_str(), e.what()); - continue; + SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, strerror(errno)); + aliasPidMap.erase(alias); } - - try + else { - std::stringstream cmd; - cmd << "kill -TERM " << pid; - EXEC_WITH_ERROR_THROW(cmd.str(), res); - SWSS_LOG_NOTICE("Sent SIGTERM to port channel %s pid %d", alias.c_str(), pid); } - catch (const std::exception &e) - { - SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, e.what()); - aliasPidMap.erase(alias); - } } for (const auto& cit: aliasPidMap) @@ -224,13 +220,12 @@ void TeamMgr::cleanTeamProcesses() const auto &alias = cit.first; const auto &pid = cit.second; - std::stringstream cmd; - std::string res; - SWSS_LOG_NOTICE("Waiting for port channel %s pid %d to stop...", alias.c_str(), pid); - cmd << "tail -f --pid=" << pid << " /dev/null"; - EXEC_WITH_ERROR_THROW(cmd.str(), res); + while (!kill(pid, 0)) + { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } } SWSS_LOG_NOTICE("LAGs cleanup is done"); @@ -658,42 +653,25 @@ bool TeamMgr::removeLag(const string &alias) { SWSS_LOG_ENTER(); - stringstream cmd; - string res; pid_t pid; - try - { - std::stringstream cmd; - cmd << "cat " << shellquote("/var/run/teamd/" + alias + ".pid"); - EXEC_WITH_ERROR_THROW(cmd.str(), res); - } - catch (const std::exception &e) { - SWSS_LOG_NOTICE("Failed to remove non-existent port channel %s pid...", alias.c_str()); - return false; - } - - try - { - pid = static_cast(std::stoul(res, nullptr, 10)); - SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); - } - catch (const std::exception &e) - { - SWSS_LOG_ERROR("Failed to read port channel %s pid: %s", alias.c_str(), e.what()); - return false; + ifstream pidfile("/var/run/teamd/" + alias + ".pid"); + if (pidfile.is_open()) + { + pidfile >> pid; + SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); + } + else + { + SWSS_LOG_NOTICE("Failed to remove non-existent port channel %s pid...", alias.c_str()); + return false; + } } - try - { - std::stringstream cmd; - cmd << "kill -TERM " << pid; - EXEC_WITH_ERROR_THROW(cmd.str(), res); - } - catch (const std::exception &e) + if (kill(pid, SIGTERM)) { - SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, e.what()); + SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, strerror(errno)); return false; } diff --git a/cfgmgr/vlanmgr.cpp b/cfgmgr/vlanmgr.cpp index ffef1e4148..96ee596958 100644 --- a/cfgmgr/vlanmgr.cpp +++ b/cfgmgr/vlanmgr.cpp @@ -21,8 +21,9 @@ using namespace swss; extern MacAddress gMacAddress; -VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames) : - Orch(cfgDb, tableNames), +VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames, + const vector &stateTableNames) : + Orch(cfgDb, stateDb, tableNames, stateTableNames), m_cfgVlanTable(cfgDb, CFG_VLAN_TABLE_NAME), m_cfgVlanMemberTable(cfgDb, CFG_VLAN_MEMBER_TABLE_NAME), m_statePortTable(stateDb, STATE_PORT_TABLE_NAME), @@ -31,6 +32,8 @@ VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_stateVlanMemberTable(stateDb, STATE_VLAN_MEMBER_TABLE_NAME), m_appVlanTableProducer(appDb, APP_VLAN_TABLE_NAME), m_appVlanMemberTableProducer(appDb, APP_VLAN_MEMBER_TABLE_NAME), + m_appFdbTableProducer(appDb, APP_FDB_TABLE_NAME), + m_appPortTableProducer(appDb, APP_PORT_TABLE_NAME), replayDone(false) { SWSS_LOG_ENTER(); @@ -80,7 +83,11 @@ VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c // /sbin/ip link del dummy 2>/dev/null; // /sbin/ip link add dummy type dummy && // /sbin/ip link set dummy master Bridge && - // /sbin/ip link set dummy up" + // /sbin/ip link set dummy up; + // /sbin/ip link set Bridge down && + // /sbin/ip link set Bridge up" + // Note: We shutdown and start-up the Bridge at the end to ensure that its + // link-local IPv6 address matches its MAC address. const std::string cmds = std::string("") + BASH_CMD + " -c \"" @@ -92,7 +99,9 @@ VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c + IP_CMD + " link del dev dummy 2>/dev/null; " + IP_CMD + " link add dummy type dummy && " + IP_CMD + " link set dummy master " + DOT1Q_BRIDGE_NAME + " && " - + IP_CMD + " link set dummy up" + "\""; + + IP_CMD + " link set dummy up; " + + IP_CMD + " link set " + DOT1Q_BRIDGE_NAME + " down && " + + IP_CMD + " link set " + DOT1Q_BRIDGE_NAME + " up\""; std::string res; EXEC_WITH_ERROR_THROW(cmds, res); @@ -190,15 +199,34 @@ bool VlanMgr::setHostVlanMac(int vlan_id, const string &mac) { SWSS_LOG_ENTER(); + std::string res; + + /* + * Bring down the bridge before changing MAC addresses of the bridge and the VLAN interface. + * This is done so that the IPv6 link-local addresses of the bridge and the VLAN interface + * are updated after MAC change. + * /sbin/ip link set Bridge down + */ + string bridge_down(IP_CMD " link set " DOT1Q_BRIDGE_NAME " down"); + EXEC_WITH_ERROR_THROW(bridge_down, res); + // The command should be generated as: - // /sbin/ip link set Vlan{{vlan_id}} address {{mac}} + // /sbin/ip link set Vlan{{vlan_id}} address {{mac}} && + // /sbin/ip link set Bridge address {{mac}} ostringstream cmds; cmds << IP_CMD " link set " VLAN_PREFIX + std::to_string(vlan_id) + " address " << shellquote(mac) << " && " IP_CMD " link set " DOT1Q_BRIDGE_NAME " address " << shellquote(mac); - - std::string res; + res.clear(); EXEC_WITH_ERROR_THROW(cmds.str(), res); + /* + * Start up the bridge again. + * /sbin/ip link set Bridge up + */ + string bridge_up(IP_CMD " link set " DOT1Q_BRIDGE_NAME " up"); + res.clear(); + EXEC_WITH_ERROR_THROW(bridge_up, res); + return true; } @@ -229,10 +257,16 @@ bool VlanMgr::addHostVlanMember(int vlan_id, const string &port_alias, const str } catch (const std::runtime_error& e) { - if (!isMemberStateOk(port_alias)) + // Race conidtion can happen with portchannel removal might happen + // but state db is not updated yet so we can do retry instead of sending exception + if (!port_alias.compare(0, strlen(LAG_PREFIX), LAG_PREFIX)) + { return false; + } else + { EXEC_WITH_ERROR_THROW(cmds.str(), res); + } } return true; @@ -643,6 +677,7 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) m_stateVlanMemberTable.set(kfvKey(t), fvVector); m_vlanMemberReplay.erase(kfvKey(t)); + m_PortVlanMember[port_alias][vlan_alias] = tagging_mode; } else { @@ -661,6 +696,7 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) key += port_alias; m_appVlanMemberTableProducer.del(key); m_stateVlanMemberTable.del(kfvKey(t)); + m_PortVlanMember[port_alias].erase(vlan_alias); } else { @@ -687,6 +723,257 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) } } +void VlanMgr::doVlanPacPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto &t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + + SWSS_LOG_DEBUG("processing %s operation %s", alias.c_str(), + op.empty() ? "none" : op.c_str()); + + if (op == SET_COMMAND) + { + string learn_mode; + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "learn_mode") + { + learn_mode = fvValue(i); + } + } + if (!learn_mode.empty()) + { + SWSS_LOG_NOTICE("set port learn mode port %s learn_mode %s\n", alias.c_str(), learn_mode.c_str()); + vector fvVector; + FieldValueTuple portLearnMode("learn_mode", learn_mode); + fvVector.push_back(portLearnMode); + m_appPortTableProducer.set(alias, fvVector); + } + } + else if (op == DEL_COMMAND) + { + if (isMemberStateOk(alias)) + { + vector fvVector; + FieldValueTuple portLearnMode("learn_mode", "hardware"); + fvVector.push_back(portLearnMode); + m_appPortTableProducer.set(alias, fvVector); + } + } + it = consumer.m_toSync.erase(it); + } +} + +void VlanMgr::doVlanPacFdbTask(Consumer &consumer) +{ + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + /* format: | */ + vector keys = tokenize(kfvKey(t), config_db_key_delimiter, 1); + /* keys[0] is vlan as (Vlan10) and keys[1] is mac as (00-00-00-00-00-00) */ + string op = kfvOp(t); + + SWSS_LOG_NOTICE("VlanMgr process static MAC vlan: %s mac: %s ", keys[0].c_str(), keys[1].c_str()); + + int vlan_id; + vlan_id = stoi(keys[0].substr(4)); + + if (!m_vlans.count(keys[0])) + { + SWSS_LOG_NOTICE("Vlan %s not available yet, mac %s", keys[0].c_str(), keys[1].c_str()); + it++; + continue; + } + + MacAddress mac = MacAddress(keys[1]); + + string key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += mac.to_string(); + + if (op == SET_COMMAND) + { + string port, discard = "false", type = "static"; + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "port") + { + port = fvValue(i); + } + if (fvField(i) == "discard") + { + discard = fvValue(i); + } + if (fvField(i) == "type") + { + type = fvValue(i); + } + } + SWSS_LOG_NOTICE("PAC FDB SET %s port %s discard %s type %s\n", + key.c_str(), port.c_str(), discard.c_str(), type.c_str()); + vector fvVector; + FieldValueTuple p("port", port); + fvVector.push_back(p); + FieldValueTuple t("type", type); + fvVector.push_back(t); + FieldValueTuple d("discard", discard); + fvVector.push_back(d); + + m_appFdbTableProducer.set(key, fvVector); + } + else if (op == DEL_COMMAND) + { + m_appFdbTableProducer.del(key); + } + it = consumer.m_toSync.erase(it); + } +} + +void VlanMgr::doVlanPacVlanMemberTask(Consumer &consumer) +{ + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto &t = it->second; + + string key = kfvKey(t); + + key = key.substr(4); + size_t found = key.find(CONFIGDB_KEY_SEPARATOR); + int vlan_id = 0; + string vlan_alias, port_alias; + if (found != string::npos) + { + vlan_id = stoi(key.substr(0, found)); + port_alias = key.substr(found+1); + } + + vlan_alias = VLAN_PREFIX + to_string(vlan_id); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + /* Don't proceed if member port/lag is not ready yet */ + if (!isMemberStateOk(port_alias) || !isVlanStateOk(vlan_alias)) + { + SWSS_LOG_DEBUG("%s not ready, delaying", kfvKey(t).c_str()); + it++; + continue; + } + string tagging_mode = "untagged"; + auto vlans = m_PortVlanMember[port_alias]; + for (const auto& vlan : vlans) + { + string vlan_alias = vlan.first; + removePortFromVlan(port_alias, vlan_alias); + } + SWSS_LOG_NOTICE("Add Vlan Member key: %s", kfvKey(t).c_str()); + if (addHostVlanMember(vlan_id, port_alias, tagging_mode)) + { + key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += port_alias; + vector fvVector = kfvFieldsValues(t); + FieldValueTuple s("dynamic", "yes"); + fvVector.push_back(s); + m_appVlanMemberTableProducer.set(key, fvVector); + + vector fvVector1; + FieldValueTuple s1("state", "ok"); + fvVector.push_back(s1); + m_stateVlanMemberTable.set(kfvKey(t), fvVector); + } + } + else if (op == DEL_COMMAND) + { + if (isVlanMemberStateOk(kfvKey(t))) + { + SWSS_LOG_NOTICE("Remove Vlan Member key: %s", kfvKey(t).c_str()); + removeHostVlanMember(vlan_id, port_alias); + key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += port_alias; + m_appVlanMemberTableProducer.del(key); + m_stateVlanMemberTable.del(kfvKey(t)); + } + + auto vlans = m_PortVlanMember[port_alias]; + for (const auto& vlan : vlans) + { + string vlan_alias = vlan.first; + string tagging_mode = vlan.second; + SWSS_LOG_NOTICE("Add Vlan Member vlan: %s port %s tagging_mode %s", + vlan_alias.c_str(), port_alias.c_str(), tagging_mode.c_str()); + addPortToVlan(port_alias, vlan_alias, tagging_mode); + } + } + /* Other than the case of member port/lag is not ready, no retry will be performed */ + it = consumer.m_toSync.erase(it); + } +} + +void VlanMgr::addPortToVlan(const std::string& membername, const std::string& vlan_alias, + const std::string& tagging_mode) +{ + SWSS_LOG_NOTICE("member %s vlan %s tagging_mode %s", + membername.c_str(), vlan_alias.c_str(), tagging_mode.c_str()); + int vlan_id = stoi(vlan_alias.substr(4)); + if (addHostVlanMember(vlan_id, membername, tagging_mode)) + { + std::string key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += membername; + vector fvVector; + FieldValueTuple s("tagging_mode", tagging_mode); + fvVector.push_back(s); + FieldValueTuple s1("dynamic", "no"); + fvVector.push_back(s1); + SWSS_LOG_INFO("key: %s\n", key.c_str()); + m_appVlanMemberTableProducer.set(key, fvVector); + + vector fvVector1; + FieldValueTuple s2("state", "ok"); + fvVector1.push_back(s2); + key = VLAN_PREFIX + to_string(vlan_id); + key += '|'; + key += membername; + m_stateVlanMemberTable.set(key, fvVector1); + } +} + +void VlanMgr::removePortFromVlan(const std::string& membername, const std::string& vlan_alias) +{ + SWSS_LOG_NOTICE("member %s vlan %s", + membername.c_str(), vlan_alias.c_str()); + int vlan_id = stoi(vlan_alias.substr(4)); + std::string key = VLAN_PREFIX + to_string(vlan_id); + key += '|'; + key += membername; + if (isVlanMemberStateOk(key)) + { + key = VLAN_PREFIX + to_string(vlan_id); + key += ':'; + key += membername; + SWSS_LOG_INFO("key: %s\n", key.c_str()); + m_appVlanMemberTableProducer.del(key); + + key = VLAN_PREFIX + to_string(vlan_id); + key += '|'; + key += membername; + m_stateVlanMemberTable.del(key); + } +} + void VlanMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -701,6 +988,18 @@ void VlanMgr::doTask(Consumer &consumer) { doVlanMemberTask(consumer); } + else if (table_name == STATE_OPER_PORT_TABLE_NAME) + { + doVlanPacPortTask(consumer); + } + else if (table_name == STATE_OPER_FDB_TABLE_NAME) + { + doVlanPacFdbTask(consumer); + } + else if (table_name == STATE_OPER_VLAN_MEMBER_TABLE_NAME) + { + doVlanPacVlanMemberTask(consumer); + } else { SWSS_LOG_ERROR("Unknown config table %s ", table_name.c_str()); diff --git a/cfgmgr/vlanmgr.h b/cfgmgr/vlanmgr.h index 8cf467f41c..7fce59ce65 100644 --- a/cfgmgr/vlanmgr.h +++ b/cfgmgr/vlanmgr.h @@ -14,11 +14,13 @@ namespace swss { class VlanMgr : public Orch { public: - VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const std::vector &tableNames); + VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const std::vector &tableNames, + const std::vector &stateTableNames); using Orch::doTask; private: ProducerStateTable m_appVlanTableProducer, m_appVlanMemberTableProducer; + ProducerStateTable m_appFdbTableProducer, m_appPortTableProducer; Table m_cfgVlanTable, m_cfgVlanMemberTable; Table m_statePortTable, m_stateLagTable; Table m_stateVlanTable, m_stateVlanMemberTable; @@ -26,6 +28,7 @@ class VlanMgr : public Orch std::set m_vlanReplay; std::set m_vlanMemberReplay; bool replayDone; + std::unordered_map> m_PortVlanMember; void doTask(Consumer &consumer); void doVlanTask(Consumer &consumer); @@ -43,6 +46,11 @@ class VlanMgr : public Orch bool isVlanStateOk(const std::string &alias); bool isVlanMacOk(); bool isVlanMemberStateOk(const std::string &vlanMemberKey); + void doVlanPacPortTask(Consumer &consumer); + void doVlanPacFdbTask(Consumer &consumer); + void doVlanPacVlanMemberTask(Consumer &consumer); + void addPortToVlan(const std::string& port_alias, const std::string& vlan_alias, const std::string& tagging_mode); + void removePortFromVlan(const std::string& port_alias, const std::string& vlan_alias); }; } diff --git a/cfgmgr/vlanmgrd.cpp b/cfgmgr/vlanmgrd.cpp index 84bc19cf08..d430063247 100644 --- a/cfgmgr/vlanmgrd.cpp +++ b/cfgmgr/vlanmgrd.cpp @@ -36,7 +36,11 @@ int main(int argc, char **argv) CFG_VLAN_TABLE_NAME, CFG_VLAN_MEMBER_TABLE_NAME, }; - + vector state_vlan_tables = { + STATE_OPER_PORT_TABLE_NAME, + STATE_OPER_FDB_TABLE_NAME, + STATE_OPER_VLAN_MEMBER_TABLE_NAME + }; DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); DBConnector stateDb("STATE_DB", 0); @@ -58,7 +62,7 @@ int main(int argc, char **argv) } gMacAddress = MacAddress(it->second); - VlanMgr vlanmgr(&cfgDb, &appDb, &stateDb, cfg_vlan_tables); + VlanMgr vlanmgr(&cfgDb, &appDb, &stateDb, cfg_vlan_tables, state_vlan_tables); std::vector cfgOrchList = {&vlanmgr}; diff --git a/configure.ac b/configure.ac index e24f69887d..145231749c 100644 --- a/configure.ac +++ b/configure.ac @@ -106,6 +106,7 @@ CFLAGS_COMMON+=" -Wvariadic-macros" CFLAGS_COMMON+=" -Wno-switch-default" CFLAGS_COMMON+=" -Wno-long-long" CFLAGS_COMMON+=" -Wno-redundant-decls" +CFLAGS_COMMON+=" -Wno-error=missing-field-initializers" # Code testing coverage with gcov AC_MSG_CHECKING(whether to build with gcov testing) diff --git a/debian/rules b/debian/rules index 0d7f82646e..7b40977954 100755 --- a/debian/rules +++ b/debian/rules @@ -44,6 +44,7 @@ override_dh_auto_install: ifeq ($(ENABLE_GCOV), y) mkdir -p debian/swss/tmp/gcov lcov -c --directory . --no-external --exclude "$(shell pwd)/tests/*" --exclude "$(shell pwd)/**/tests/*" --ignore-errors gcov --output-file coverage.info + lcov --add-tracefile coverage.info -o coverage.info lcov_cobertura coverage.info -o coverage.xml find ./ -type f -regex '.*\.\(h\|cpp\|gcno\|info\)' | tar -cf debian/swss/tmp/gcov/gcov-source.tar -T - endif diff --git a/dev/Dockerfile.yml b/dev/Dockerfile.yml new file mode 100644 index 0000000000..acb0d9054b --- /dev/null +++ b/dev/Dockerfile.yml @@ -0,0 +1,92 @@ +ARG DEBIAN_VERSION="bookworm" +FROM sonicdev-microsoft.azurecr.io:443/sonic-slave-${DEBIAN_VERSION}:latest + +ARG UID=1000 +ARG GID=1000 + +RUN groupadd -g ${GID} sonicdev && \ + useradd -u ${UID} -g ${GID} -ms /bin/bash sonicdev + +RUN mkdir -p /workspace && \ + mkdir -p /workspace/debs && \ + mkdir -p /workspace/tools && \ + chown -R sonicdev:sonicdev /workspace + +ENV PATH="${PATH}:/workspace/tools" + +RUN apt-get update && \ + sudo apt-get install -y \ + libhiredis-dev \ + libzmq3-dev \ + swig4.0 \ + libdbus-1-dev \ + libteam-dev \ + protobuf-compiler \ + libprotobuf-dev && \ + sudo pip3 install lcov_cobertura + +COPY dev/download_artifact.sh /workspace/tools/download_artifact.sh + +WORKDIR /workspace/debs + +ARG BRANCH_NAME="master" +ARG PLATFORM="amd64" +ARG DEBIAN_VERSION + +# SWSS COMMON + +ARG SWSS_COMMON_PROJECT_NAME="Azure.sonic-swss-common" +ARG SWSS_COMMON_ARTIFACT_NAME="sonic-swss-common" +ARG SWSS_COMMON_FILE_PATHS="/libswsscommon_1.0.0_${PLATFORM}.deb /libswsscommon-dev_1.0.0_${PLATFORM}.deb" + +RUN download_artifact.sh "${SWSS_COMMON_PROJECT_NAME}" "${BRANCH_NAME}" "${SWSS_COMMON_ARTIFACT_NAME}" "${SWSS_COMMON_FILE_PATHS}" + +# SAIREDIS + +ARG SAIREDIS_PROJECT_NAME="Azure.sonic-sairedis" +ARG SAIREDIS_ARTIFACT_NAME="sonic-sairedis" +ARG SAIREDIS_FILE_PATHS="\ + /libsaivs_1.0.0_${PLATFORM}.deb \ + /libsaivs-dev_1.0.0_${PLATFORM}.deb \ + /libsairedis_1.0.0_${PLATFORM}.deb \ + /libsairedis-dev_1.0.0_${PLATFORM}.deb \ + /libsaimetadata_1.0.0_${PLATFORM}.deb \ + /libsaimetadata-dev_1.0.0_${PLATFORM}.deb \ + /syncd-vs_1.0.0_${PLATFORM}.deb \ + " + +RUN download_artifact.sh "${SAIREDIS_PROJECT_NAME}" "${BRANCH_NAME}" "${SAIREDIS_ARTIFACT_NAME}" "${SAIREDIS_FILE_PATHS}" + +# COMMON LIB + +ARG COMMON_LIB_PROJECT_NAME="Azure.sonic-buildimage.common_libs" +ARG COMMON_LIB_ARTIFACT_NAME="common-lib" +ARG COMMON_LIB_FILE_PATHS="\ + /target/debs/${DEBIAN_VERSION}/libnl-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-genl-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-genl-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-route-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-route-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-nf-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-nf-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libyang_1.0.73_${PLATFORM}.deb \ + " + +RUN download_artifact.sh "${COMMON_LIB_PROJECT_NAME}" "${BRANCH_NAME}" "${COMMON_LIB_ARTIFACT_NAME}" "${COMMON_LIB_FILE_PATHS}" + +# DASH API + +ARG DASH_API_PROJECT_NAME="sonic-net.sonic-dash-api" +ARG DASH_API_ARTIFACT_NAME="sonic-dash-api" +ARG DASH_API_FILE_PATHS="/libdashapi_1.0.0_${PLATFORM}.deb" + +RUN download_artifact.sh "${DASH_API_PROJECT_NAME}" "${BRANCH_NAME}" "${DASH_API_ARTIFACT_NAME}" "${DASH_API_FILE_PATHS}" + +RUN dpkg -i *.deb + +WORKDIR /workspace + +USER sonicdev + +ENTRYPOINT [ "bash" ] diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml new file mode 100644 index 0000000000..ce51eb6781 --- /dev/null +++ b/dev/docker-compose.yml @@ -0,0 +1,18 @@ +services: + sonicdev: + container_name: sonicdev + build: + context: .. + dockerfile: dev/Dockerfile.yml + args: + - DEBIAN_VERSION + - UID + - GID + - BRANCH_NAME + - PLATFORM + volumes: + - ..:/workspace/sonic-swss + init: true + privileged: true + working_dir: /workspace/sonic-swss + diff --git a/dev/download_artifact.sh b/dev/download_artifact.sh new file mode 100755 index 0000000000..282ca50475 --- /dev/null +++ b/dev/download_artifact.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# download_artifact.sh +# +# targetPaths: space separated list of target paths to download from the artifact +# e.g. +# ./download_artifact.sh "Azure.sonic-swss-common" "master" "sonic-swss-common" "/libswsscommon-dev_1.0.0_amd64.deb /libswsscommon_1.0.0_amd64.deb" + +set -x -e + +pipelineName=${1} +branchName=${2} +artifactName=${3} +targetPaths=${4} + +queryPipelinesUrl="https://dev.azure.com/mssonic/build/_apis/pipelines" + +definitions=$(curl -s "${queryPipelinesUrl}" | jq -r ".value[] | select (.name == \"${pipelineName}\").id") + +queryBuildsUrl="https://dev.azure.com/mssonic/build/_apis/build/builds?definitions=${definitions}&branchName=refs/heads/${branchName}&resultFilter=succeeded&statusFilter=completed&api-version=6.0" + +buildId=$(curl -s ${queryBuildsUrl} | jq -r '.value[0].id') + +queryArtifactUrl="https://dev.azure.com/mssonic/build/_apis/build/builds/${buildId}/artifacts?artifactName=${artifactName}&api-version=6.0" + +function download_artifact { + + target_path=${1} + output_file=$(sed 's/.*\///' <<< ${target_path}) + + download_artifact_url=$(curl -s ${queryArtifactUrl} | jq -r '.resource.downloadUrl') + download_artifact_url=$(sed 's/zip$/file/' <<< ${download_artifact_url}) + download_artifact_url="$download_artifact_url&subPath=${target_path}" + + wget -O ${output_file} ${download_artifact_url} +} + +function download_artifacts { + target_paths_array=(${targetPaths}) + for target_path in "${target_paths_array[@]}" + do + download_artifact ${target_path} + done +} + +download_artifacts diff --git a/fpmsyncd/fpmlink.cpp b/fpmsyncd/fpmlink.cpp index 1ed888d292..d90ea8aed6 100644 --- a/fpmsyncd/fpmlink.cpp +++ b/fpmsyncd/fpmlink.cpp @@ -281,6 +281,11 @@ void FpmLink::processFpmMessage(fpm_msg_hdr_t* hdr) /* EVPN Type5 Add route processing */ processRawMsg(nl_hdr); } + else if(nl_hdr->nlmsg_type == RTM_NEWNEXTHOP || nl_hdr->nlmsg_type == RTM_DELNEXTHOP) + { + /* rtnl api dont support RTM_NEWNEXTHOP/RTM_DELNEXTHOP yet. Processing as raw message*/ + processRawMsg(nl_hdr); + } else { NetDispatcher::getInstance().onNetlinkMessage(msg); @@ -320,4 +325,4 @@ bool FpmLink::send(nlmsghdr* nl_hdr) } return true; -} +} \ No newline at end of file diff --git a/fpmsyncd/routesync.cpp b/fpmsyncd/routesync.cpp index cea63dc42f..7c2afe5597 100644 --- a/fpmsyncd/routesync.cpp +++ b/fpmsyncd/routesync.cpp @@ -13,6 +13,7 @@ #include "converter.h" #include #include +#include using namespace std; using namespace swss; @@ -34,6 +35,11 @@ using namespace swss; ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg)))) #endif +#ifndef NHA__RTA +#define NHA_RTA(r) \ + ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct nhmsg)))) +#endif + #define VXLAN_VNI 0 #define VXLAN_RMAC 1 #define NH_ENCAP_VXLAN 100 @@ -106,6 +112,8 @@ enum { ROUTE_ENCAP_SRV6_ENCAP_SRC_ADDR = 2, }; +#define MAX_MULTIPATH_NUM 514 + /* Returns name of the protocol passed number represents */ static string getProtocolString(int proto) { @@ -138,6 +146,7 @@ static decltype(auto) makeNlAddr(const T& ip) RouteSync::RouteSync(RedisPipeline *pipeline) : m_routeTable(pipeline, APP_ROUTE_TABLE_NAME, true), + m_nexthop_groupTable(pipeline, APP_NEXTHOP_GROUP_TABLE_NAME, true), m_label_routeTable(pipeline, APP_LABEL_ROUTE_TABLE_NAME, true), m_vnet_routeTable(pipeline, APP_VNET_RT_TABLE_NAME, true), m_vnet_tunnelTable(pipeline, APP_VNET_RT_TUNNEL_TABLE_NAME, true), @@ -1444,11 +1453,21 @@ void RouteSync::onMsgRaw(struct nlmsghdr *h) if ((h->nlmsg_type != RTM_NEWROUTE) && (h->nlmsg_type != RTM_DELROUTE) && (h->nlmsg_type != RTM_NEWSRV6LOCALSID) - && (h->nlmsg_type != RTM_DELSRV6LOCALSID)) + && (h->nlmsg_type != RTM_DELSRV6LOCALSID) + && (h->nlmsg_type != RTM_NEWNEXTHOP) + && (h->nlmsg_type != RTM_DELNEXTHOP) + ) return; + if(h->nlmsg_type == RTM_NEWNEXTHOP || h->nlmsg_type == RTM_DELNEXTHOP) + { + len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + } + else + { + len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct ndmsg))); + } /* Length validity. */ - len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct ndmsg))); if (len < 0) { SWSS_LOG_ERROR("%s: Message received from netlink is of a broken size %d %zu", @@ -1457,6 +1476,12 @@ void RouteSync::onMsgRaw(struct nlmsghdr *h) return; } + if(h->nlmsg_type == RTM_NEWNEXTHOP || h->nlmsg_type == RTM_DELNEXTHOP) + { + onNextHopMsg(h, len); + return; + } + if ((h->nlmsg_type == RTM_NEWSRV6LOCALSID) || (h->nlmsg_type == RTM_DELSRV6LOCALSID)) { @@ -1481,8 +1506,6 @@ void RouteSync::onMsgRaw(struct nlmsghdr *h) onEvpnRouteMsg(h, len); break; } - - return; } void RouteSync::onMsg(int nlmsg_type, struct nl_object *obj) @@ -1637,88 +1660,148 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) return; } - struct nl_list_head *nhs = rtnl_route_get_nexthops(route_obj); - if (!nhs) - { - SWSS_LOG_INFO("Nexthop list is empty for %s", destipprefix); - return; - } - - /* Get nexthop lists */ + vector fvVector; string gw_list; string intf_list; string mpls_list; - getNextHopList(route_obj, gw_list, mpls_list, intf_list); - string weights = getNextHopWt(route_obj); - vector alsv = tokenize(intf_list, NHG_DELIMITER); - for (auto alias : alsv) + string nhg_id_key; + uint32_t nhg_id = rtnl_route_get_nh_id(route_obj); + if(nhg_id) { - /* - * An FRR behavior change from 7.2 to 7.5 makes FRR update default route to eth0 in interface - * up/down events. Skipping routes to eth0 or docker0 to avoid such behavior - */ - if (alias == "eth0" || alias == "docker0") - { - SWSS_LOG_DEBUG("Skip routes to eth0 or docker0: %s %s %s", - destipprefix, gw_list.c_str(), intf_list.c_str()); - // If intf_list has only this interface, that means all of the next hops of this route - // have been removed and the next hop on the eth0/docker0 has become the only next hop. - // In this case since we do not want the route with next hop on eth0/docker0, we return. - // But still we need to clear the route from the APPL_DB. Otherwise the APPL_DB and data - // path will be left with stale route entry - if(alsv.size() == 1) + const auto itg = m_nh_groups.find(nhg_id); + if(itg == m_nh_groups.end()) + { + SWSS_LOG_ERROR("NextHop group id %d not found. Dropping the route %s", nhg_id, destipprefix); + return; + } + NextHopGroup& nhg = itg->second; + if(nhg.group.size() == 0) + { + // Using route-table only for single next-hop + string nexthops = nhg.nexthop.empty() ? (rtnl_route_get_family(route_obj) == AF_INET ? "0.0.0.0" : "::") : nhg.nexthop; + string ifnames, weights; + + getNextHopGroupFields(nhg, nexthops, ifnames, weights, rtnl_route_get_family(route_obj)); + + FieldValueTuple gw("nexthop", nexthops.c_str()); + FieldValueTuple intf("ifname", ifnames.c_str()); + fvVector.push_back(gw); + fvVector.push_back(intf); + + SWSS_LOG_DEBUG("NextHop group id %d is a single nexthop address. Filling the route table %s with nexthop and ifname", nhg_id, destipprefix); + } + else + { + nhg_id_key = getNextHopGroupKeyAsString(nhg_id); + FieldValueTuple nhg("nexthop_group", nhg_id_key.c_str()); + fvVector.push_back(nhg); + installNextHopGroup(nhg_id); + } + + auto proto_num = rtnl_route_get_protocol(route_obj); + auto proto_str = getProtocolString(proto_num); + FieldValueTuple proto("protocol", proto_str); + fvVector.push_back(proto); + + } + else + { + struct nl_list_head *nhs = rtnl_route_get_nexthops(route_obj); + if (!nhs) + { + SWSS_LOG_INFO("Nexthop list is empty for %s", destipprefix); + return; + } + + /* Get nexthop lists */ + + getNextHopList(route_obj, gw_list, mpls_list, intf_list); + string weights = getNextHopWt(route_obj); + + vector alsv = tokenize(intf_list, NHG_DELIMITER); + + if (alsv.size() == 1) + { + if (alsv[0] == "eth0" || alsv[0] == "docker0") { + SWSS_LOG_DEBUG("Skip routes to eth0 or docker0: %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str()); + if (!warmRestartInProgress) { SWSS_LOG_NOTICE("RouteTable del msg for route with only one nh on eth0/docker0: %s %s %s %s", - destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); m_routeTable.del(destipprefix); } else { SWSS_LOG_NOTICE("Warm-Restart mode: Receiving delete msg for route with only nh on eth0/docker0: %s %s %s %s", - destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); vector fvVector; const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, - DEL_COMMAND, - fvVector); + DEL_COMMAND, + fvVector); m_warmStartHelper.insertRefreshMap(kfv); } + return; + } + } + else + { + for (auto alias : alsv) + { + /* + * A change in FRR behavior from version 7.2 to 7.5 causes the default route to be updated to eth0 + * during interface up/down events. This skips routes to eth0 or docker0 to avoid such behavior. + */ + if (alias == "eth0" || alias == "docker0") + { + SWSS_LOG_DEBUG("Skip routes to eth0 or docker0: %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str()); + continue; + } } - return; } - } - auto proto_num = rtnl_route_get_protocol(route_obj); - auto proto_str = getProtocolString(proto_num); + auto proto_num = rtnl_route_get_protocol(route_obj); + auto proto_str = getProtocolString(proto_num); - vector fvVector; - FieldValueTuple proto("protocol", proto_str); - FieldValueTuple gw("nexthop", gw_list); - FieldValueTuple intf("ifname", intf_list); - fvVector.push_back(proto); - fvVector.push_back(gw); - fvVector.push_back(intf); - if (!mpls_list.empty()) - { - FieldValueTuple mpls_nh("mpls_nh", mpls_list); - fvVector.push_back(mpls_nh); - } - if (!weights.empty()) - { - FieldValueTuple wt("weight", weights); - fvVector.push_back(wt); + FieldValueTuple proto("protocol", proto_str); + FieldValueTuple gw("nexthop", gw_list); + FieldValueTuple intf("ifname", intf_list); + + fvVector.push_back(proto); + fvVector.push_back(gw); + fvVector.push_back(intf); + if (!mpls_list.empty()) + { + FieldValueTuple mpls_nh("mpls_nh", mpls_list); + fvVector.push_back(mpls_nh); + } + if (!weights.empty()) + { + FieldValueTuple wt("weight", weights); + fvVector.push_back(wt); + } } if (!warmRestartInProgress) { - m_routeTable.set(destipprefix, fvVector); - SWSS_LOG_DEBUG("RouteTable set msg: %s %s %s %s", destipprefix, + if(nhg_id) + { + m_routeTable.set(destipprefix, fvVector); + SWSS_LOG_INFO("RouteTable set msg: %s %d ", destipprefix, nhg_id); + } + else + { + m_routeTable.set(destipprefix, fvVector); + SWSS_LOG_INFO("RouteTable set msg: %s %s %s %s", destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + } } /* @@ -1737,7 +1820,131 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) } } -/* +/* + * Handle Nexthop msg + * @arg nlmsghdr Netlink messaged + */ +void RouteSync::onNextHopMsg(struct nlmsghdr *h, int len) +{ + int nlmsg_type = h->nlmsg_type; + uint32_t id = 0; + unsigned char addr_family; + int32_t ifindex = -1, grp_count = 0; + string ifname; + struct nhmsg *nhm = NULL; + struct rtattr *tb[NHA_MAX + 1] = {}; + struct in_addr ipv4 = {0}; + struct in6_addr ipv6 = {0}; + char gateway[INET6_ADDRSTRLEN] = {0}; + char ifname_unknown[IFNAMSIZ] = "unknown"; + + nhm = (struct nhmsg *)NLMSG_DATA(h); + + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wcast-align" + struct rtattr* rta = NHA_RTA(nhm); + #pragma GCC diagnostic pop + + netlink_parse_rtattr(tb, NHA_MAX, rta, len); + + if (!tb[NHA_ID]) { + SWSS_LOG_ERROR( + "Nexthop group without an ID received from the zebra"); + return; + } + + /* We use the ID key'd nhg table for kernel updates */ + id = *((uint32_t *)RTA_DATA(tb[NHA_ID])); + + addr_family = nhm->nh_family; + + if (nlmsg_type == RTM_NEWNEXTHOP) + { + if (tb[NHA_GROUP]) + { + SWSS_LOG_INFO("New nexthop group message!"); + + struct nexthop_grp *nha_grp = (struct nexthop_grp *)RTA_DATA(tb[NHA_GROUP]); + grp_count = (int)(RTA_PAYLOAD(tb[NHA_GROUP]) / sizeof(*nha_grp)); + + if (grp_count > MAX_MULTIPATH_NUM) + { + SWSS_LOG_ERROR("Nexthop group count (%d) exceeds the maximum allowed (%d). Clamping to maximum.", grp_count, MAX_MULTIPATH_NUM); + grp_count = MAX_MULTIPATH_NUM; + } + + vector> group(grp_count); + for (int i = 0; i < grp_count; i++) + { + group[i] = std::make_pair(nha_grp[i].id, nha_grp[i].weight + 1); + } + + auto it = m_nh_groups.find(id); + if (it != m_nh_groups.end()) + { + NextHopGroup &nhg = it->second; + nhg.group = group; + if (nhg.installed) + { + updateNextHopGroupDb(nhg); + } + } + else + { + m_nh_groups.insert({id, NextHopGroup(id, group)}); + } + } + else + { + if (tb[NHA_GATEWAY]) + { + if (addr_family == AF_INET) + { + memcpy(&ipv4, (void *)RTA_DATA(tb[NHA_GATEWAY]), 4); + inet_ntop(AF_INET, &ipv4, gateway, INET_ADDRSTRLEN); + } + else if (addr_family == AF_INET6) + { + memcpy(&ipv6, (void *)RTA_DATA(tb[NHA_GATEWAY]), 16); + inet_ntop(AF_INET6, &ipv6, gateway, INET6_ADDRSTRLEN); + } + else + { + SWSS_LOG_ERROR("Unexpected nexthop address family"); + return; + } + } + + if (tb[NHA_OIF]) + { + ifindex = *((int32_t *)RTA_DATA(tb[NHA_OIF])); + char if_name[IFNAMSIZ] = {0}; + if (!getIfName(ifindex, if_name, IFNAMSIZ)) + { + strcpy(if_name, ifname_unknown); + } + ifname = string(if_name); + if (ifname == "eth0" || ifname == "docker0") + { + SWSS_LOG_DEBUG("Skip routes to interface: %s id[%d]", ifname.c_str(), id); + return; + } + } + + SWSS_LOG_DEBUG("Received: id[%d], if[%d/%s] address[%s]", id, ifindex, ifname.c_str(), gateway); + m_nh_groups.insert({id, NextHopGroup(id, string(gateway), ifname)}); + } + } + else if (nlmsg_type == RTM_DELNEXTHOP) + { + SWSS_LOG_DEBUG("NextHopGroup del event: %d", id); + deleteNextHopGroup(id); + } + + return; +} + +/* * Handle label route * @arg nlmsg_type Netlink message type * @arg obj Netlink object @@ -2404,3 +2611,140 @@ void RouteSync::onWarmStartEnd(DBConnector& applStateDb) SWSS_LOG_NOTICE("Warm-Restart reconciliation processed."); } } + +/* + * Get nexthop group key as string + * @arg id next hop group id + * + * Return nexthop group key + */ +const string RouteSync::getNextHopGroupKeyAsString(uint32_t id) const +{ + return to_string(id); +} + +/* + * update the nexthop group entry + * @arg nh_id nexthop group id + * + */ +void RouteSync::installNextHopGroup(uint32_t nh_id) +{ + auto git = m_nh_groups.find(nh_id); + if(git == m_nh_groups.end()) + { + SWSS_LOG_ERROR("Nexthop not found: %d", nh_id); + return; + } + + NextHopGroup& nhg = git->second; + + if(nhg.installed) + { + //Nexthop group already installed + return; + } + nhg.installed = true; + updateNextHopGroupDb(nhg); +} + +/* + * delete the nexthop group entry + * @arg nh_id nexthop group id + * + */ +void RouteSync::deleteNextHopGroup(uint32_t nh_id) +{ + auto git = m_nh_groups.find(nh_id); + if(git == m_nh_groups.end()) + { + SWSS_LOG_ERROR("Nexthop not found: %d", nh_id); + return; + } + + NextHopGroup& nhg = git->second; + + if(nhg.installed) + { + string key = getNextHopGroupKeyAsString(nh_id); + m_nexthop_groupTable.del(key.c_str()); + SWSS_LOG_DEBUG("NextHopGroup table del: key [%s]", key.c_str()); + } + m_nh_groups.erase(git); +} + +/* + * update the nexthop group table in database + * @arg nhg the nexthop group + * + */ +void RouteSync::updateNextHopGroupDb(const NextHopGroup& nhg) +{ + vector fvVector; + string nexthops; + string ifnames; + string weights; + string key = getNextHopGroupKeyAsString(nhg.id); + getNextHopGroupFields(nhg, nexthops, ifnames, weights); + + FieldValueTuple nh("nexthop", nexthops.c_str()); + FieldValueTuple ifname("ifname", ifnames.c_str()); + fvVector.push_back(nh); + fvVector.push_back(ifname); + if(!weights.empty()) + { + FieldValueTuple wg("weight", weights.c_str()); + fvVector.push_back(wg); + } + SWSS_LOG_INFO("NextHopGroup table set: key [%s] nexthop[%s] ifname[%s] weight[%s]", key.c_str(), nexthops.c_str(), ifnames.c_str(), weights.c_str()); + + m_nexthop_groupTable.set(key.c_str(), fvVector); +} + +/* + * generate the database fields. + * @arg nhg the nexthop group + * + */ +void RouteSync::getNextHopGroupFields(const NextHopGroup& nhg, string& nexthops, string& ifnames, string& weights, uint8_t af /*= AF_INET*/) +{ + if(nhg.group.size() == 0) + { + if(!nhg.nexthop.empty()) + { + nexthops = nhg.nexthop; + } + else + { + nexthops = af == AF_INET ? "0.0.0.0" : "::"; + } + ifnames = nhg.intf; + } + else + { + int i = 0; + for(const auto& nh : nhg.group) + { + uint32_t id = nh.first; + auto itr = m_nh_groups.find(id); + if(itr == m_nh_groups.end()) + { + SWSS_LOG_ERROR("NextHop group is incomplete: %d", nhg.id); + return; + } + + NextHopGroup& nhgr = itr->second; + string weight = to_string(nh.second); + if(i) + { + nexthops += NHG_DELIMITER; + ifnames += NHG_DELIMITER; + weights += NHG_DELIMITER; + } + nexthops += nhgr.nexthop.empty() ? (af == AF_INET ? "0.0.0.0" : "::") : nhgr.nexthop; + ifnames += nhgr.intf; + weights += weight; + ++i; + } + } +} \ No newline at end of file diff --git a/fpmsyncd/routesync.h b/fpmsyncd/routesync.h index fe67f6acfc..fbc042d009 100644 --- a/fpmsyncd/routesync.h +++ b/fpmsyncd/routesync.h @@ -9,6 +9,7 @@ #include "warmRestartHelper.h" #include #include +#include #include @@ -26,6 +27,16 @@ extern void netlink_parse_rtattr(struct rtattr **tb, int max, struct rtattr *rta namespace swss { +struct NextHopGroup { + uint32_t id; + vector> group; + string nexthop; + string intf; + bool installed; + NextHopGroup(uint32_t id, const string& nexthop, const string& interface) : installed(false), id(id), nexthop(nexthop), intf(interface) {}; + NextHopGroup(uint32_t id, const vector>& group) : installed(false), id(id), group(group) {}; +}; + /* Path to protocol name database provided by iproute2 */ constexpr auto DefaultRtProtoPath = "/etc/iproute2/rt_protos"; @@ -81,6 +92,9 @@ class RouteSync : public NetMsg ProducerStateTable m_srv6SidListTable; struct nl_cache *m_link_cache; struct nl_sock *m_nl_sock; + /* nexthop group table */ + ProducerStateTable m_nexthop_groupTable; + map m_nh_groups; bool m_isSuppressionEnabled{false}; FpmInterface* m_fpmInterface {nullptr}; @@ -123,7 +137,7 @@ class RouteSync : public NetMsg void onVnetRouteMsg(int nlmsg_type, struct nl_object *obj, string vnet); /* Get interface name based on interface index */ - bool getIfName(int if_index, char *if_name, size_t name_len); + virtual bool getIfName(int if_index, char *if_name, size_t name_len); /* Get interface if_index based on interface name */ rtnl_link* getLinkByName(const char *name); @@ -169,8 +183,17 @@ class RouteSync : public NetMsg uint16_t getEncapType(struct nlmsghdr *h); const char *mySidAction2Str(uint32_t action); + + /* Handle Nexthop message */ + void onNextHopMsg(struct nlmsghdr *h, int len); + /* Get next hop group key */ + const string getNextHopGroupKeyAsString(uint32_t id) const; + void installNextHopGroup(uint32_t nh_id); + void deleteNextHopGroup(uint32_t nh_id); + void updateNextHopGroupDb(const NextHopGroup& nhg); + void getNextHopGroupFields(const NextHopGroup& nhg, string& nexthops, string& ifnames, string& weights, uint8_t af = AF_INET); }; } -#endif +#endif \ No newline at end of file diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index f94e6d3bfa..acf1011d85 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -138,7 +138,7 @@ orchagent_SOURCES += p4orch/p4orch.cpp \ orchagent_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) orchagent_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) -orchagent_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq -lprotobuf -ldashapi +orchagent_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq -lprotobuf -ldashapi -ljemalloc routeresync_SOURCES = routeresync.cpp routeresync_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index c7c81362f6..60ab40dca6 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -11,6 +11,7 @@ #include "timer.h" #include "crmorch.h" #include "sai_serialize.h" +#include "directory.h" using namespace std; using namespace swss; @@ -30,6 +31,7 @@ extern PortsOrch* gPortsOrch; extern CrmOrch *gCrmOrch; extern SwitchOrch *gSwitchOrch; extern string gMySwitchType; +extern Directory gDirectory; #define MIN_VLAN_ID 1 // 0 is a reserved VLAN ID #define MAX_VLAN_ID 4095 // 4096 is a reserved VLAN ID @@ -617,6 +619,35 @@ bool AclTableRangeMatch::validateAclRuleMatch(const AclRule& rule) const return true; } +void AclRule::TunnelNH::load(const std::string& target) +{ + parse(target); + + VxlanTunnelOrch* vxlan_orch = gDirectory.get(); + /* Only the first call creates the SAI object, further calls just increment the ref count */ + oid = vxlan_orch->createNextHopTunnel(tunnel_name, endpoint_ip, mac, vni); +} + +void AclRule::TunnelNH::parse(const std::string& target) +{ + /* Supported Format: endpoint_ip@tunnel_name */ + auto at_pos = target.find('@'); + if (at_pos == std::string::npos) + { + throw std::logic_error("Invalid format for Tunnel Next Hop"); + } + + endpoint_ip = swss::IpAddress(target.substr(0, at_pos)); + tunnel_name = target.substr(at_pos + 1); +} + +void AclRule::TunnelNH::clear() +{ + oid = SAI_NULL_OBJECT_ID; + VxlanTunnelOrch* vxlan_orch = gDirectory.get(); + vxlan_orch->removeNextHopTunnel(tunnel_name, endpoint_ip, mac, vni); +} + string AclTableType::getName() const { return m_name; @@ -1308,7 +1339,17 @@ void AclRule::decreaseNextHopRefCount() } m_redirect_target_next_hop_group.clear(); } - + if (m_redirect_target_tun_nh.oid != SAI_NULL_OBJECT_ID) + { + try + { + m_redirect_target_tun_nh.clear(); + } + catch (const std::runtime_error& e) + { + SWSS_LOG_ERROR("Failed to remove tunnel nh reference %s, ACL Rule: %s", e.what(), m_id.c_str()); + } + } return; } @@ -2001,21 +2042,38 @@ sai_object_id_t AclRulePacket::getRedirectObjectId(const string& redirect_value) try { NextHopKey nh(target); - if (!m_pAclOrch->m_neighOrch->hasNextHop(nh)) + if (m_pAclOrch->m_neighOrch->hasNextHop(nh)) { - SWSS_LOG_ERROR("ACL Redirect action target next hop ip: '%s' doesn't exist on the switch", nh.to_string().c_str()); - return SAI_NULL_OBJECT_ID; + m_redirect_target_next_hop = target; + m_pAclOrch->m_neighOrch->increaseNextHopRefCount(nh); + return m_pAclOrch->m_neighOrch->getNextHopId(nh); } - - m_redirect_target_next_hop = target; - m_pAclOrch->m_neighOrch->increaseNextHopRefCount(nh); - return m_pAclOrch->m_neighOrch->getNextHopId(nh); } catch (...) { // no error, just try next variant } + // Try to parse if this is a tunnel nexthop. + try + { + m_redirect_target_tun_nh.load(target); + if (SAI_NULL_OBJECT_ID != m_redirect_target_tun_nh.oid) + { + SWSS_LOG_INFO("Tunnel Next Hop Found: oid:0x%" PRIx64 ", target: %s", m_redirect_target_tun_nh.oid, target.c_str()); + return m_redirect_target_tun_nh.oid; + } + } + catch (std::logic_error& e) + { + // no error, just try next variant + } + catch (const std::runtime_error& e) + { + SWSS_LOG_ERROR("Failed to create/fetch tunnel next hop, %s, err: %s", target.c_str(), e.what()); + return SAI_NULL_OBJECT_ID; + } + // try to parse nh group the set of try { diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index 9e27be2ac0..beb389260e 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -15,6 +15,7 @@ #include "mirrororch.h" #include "dtelorch.h" #include "observer.h" +#include "vxlanorch.h" #include "flex_counter_manager.h" #include "acltable.h" @@ -286,6 +287,22 @@ class AclTable; class AclRule { public: + struct TunnelNH + { + TunnelNH() = default; + ~TunnelNH() = default; + + void load(const std::string& target); + void parse(const std::string& target); + void clear(); + + std::string tunnel_name; + swss::IpAddress endpoint_ip; + swss::MacAddress mac; + uint32_t vni = 0; + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + }; + AclRule(AclOrch *pAclOrch, string rule, string table, bool createCounter = true); virtual bool validateAddPriority(string attr_name, string attr_value); virtual bool validateAddMatch(string attr_name, string attr_value); @@ -359,6 +376,7 @@ class AclRule map m_matches; string m_redirect_target_next_hop; string m_redirect_target_next_hop_group; + AclRule::TunnelNH m_redirect_target_tun_nh; vector m_rangeConfig; vector m_ranges; diff --git a/orchagent/dash/dashorch.cpp b/orchagent/dash/dashorch.cpp index 03bb69be4b..d9aac53ce4 100644 --- a/orchagent/dash/dashorch.cpp +++ b/orchagent/dash/dashorch.cpp @@ -140,11 +140,19 @@ bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::applian } sai_direction_lookup_entry_t direction_lookup_entry; + vector direction_lookup_attrs; direction_lookup_entry.switch_id = gSwitchId; direction_lookup_entry.vni = entry.vm_vni(); appliance_attr.id = SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION; appliance_attr.value.u32 = SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION; - status = sai_dash_direction_lookup_api->create_direction_lookup_entry(&direction_lookup_entry, attr_count, &appliance_attr); + direction_lookup_attrs.push_back(appliance_attr); + + appliance_attr.id = SAI_DIRECTION_LOOKUP_ENTRY_ATTR_DASH_ENI_MAC_OVERRIDE_TYPE; + appliance_attr.value.u32 = SAI_DASH_ENI_MAC_OVERRIDE_TYPE_DST_MAC; + direction_lookup_attrs.push_back(appliance_attr); + + status = sai_dash_direction_lookup_api->create_direction_lookup_entry(&direction_lookup_entry, + (uint32_t)direction_lookup_attrs.size(), direction_lookup_attrs.data()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create direction lookup entry for %s", appliance_id.c_str()); diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index 5dbaf291f8..98236be7d5 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -772,6 +772,7 @@ void FdbOrch::doTask(Consumer& consumer) string esi = ""; unsigned int vni = 0; string sticky = ""; + string discard = "false"; for (auto i : kfvFieldsValues(t)) { @@ -784,6 +785,10 @@ void FdbOrch::doTask(Consumer& consumer) { type = fvValue(i); } + if (fvField(i) == "discard") + { + discard = fvValue(i); + } if(origin == FDB_ORIGIN_VXLAN_ADVERTIZED) { @@ -859,6 +864,7 @@ void FdbOrch::doTask(Consumer& consumer) fdbData.esi = esi; fdbData.vni = vni; fdbData.is_flush_pending = false; + fdbData.discard = discard; if (addFdbEntry(entry, port, fdbData)) { if (origin == FDB_ORIGIN_MCLAG_ADVERTIZED) @@ -1480,7 +1486,9 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, attrs.push_back(attr); } } - + attr.id = SAI_FDB_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = (fdbData.discard == "true") ? SAI_PACKET_ACTION_DROP: SAI_PACKET_ACTION_FORWARD; + attrs.push_back(attr); if (macUpdate) { SWSS_LOG_INFO("MAC-Update FDB %s in %s on from-%s:to-%s from-%s:to-%s origin-%d-to-%d", diff --git a/orchagent/fdborch.h b/orchagent/fdborch.h index a5a5fcd0e0..ef912b8400 100644 --- a/orchagent/fdborch.h +++ b/orchagent/fdborch.h @@ -65,6 +65,7 @@ struct FdbData string esi; unsigned int vni; sai_fdb_entry_type_t sai_fdb_type; + string discard; }; struct SavedFdbEntry diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index 2832f0bd12..6fa37f4950 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -118,6 +118,8 @@ void FlexCounterOrch::doTask(Consumer &consumer) { auto itDelay = std::find(std::begin(data), std::end(data), FieldValueTuple(FLEX_COUNTER_DELAY_STATUS_FIELD, "true")); string poll_interval; + string bulk_chunk_size; + string bulk_chunk_size_per_counter; if (itDelay != data.end()) { @@ -141,6 +143,14 @@ void FlexCounterOrch::doTask(Consumer &consumer) } } } + else if (field == BULK_CHUNK_SIZE_FIELD) + { + bulk_chunk_size = value; + } + else if (field == BULK_CHUNK_SIZE_PER_PREFIX_FIELD) + { + bulk_chunk_size_per_counter = value; + } else if(field == FLEX_COUNTER_STATUS_FIELD) { // Currently, the counters are disabled for polling by default @@ -256,6 +266,19 @@ void FlexCounterOrch::doTask(Consumer &consumer) SWSS_LOG_NOTICE("Unsupported field %s", field.c_str()); } } + + if (!bulk_chunk_size.empty() || !bulk_chunk_size_per_counter.empty()) + { + m_groupsWithBulkChunkSize.insert(key); + setFlexCounterGroupBulkChunkSize(flexCounterGroupMap[key], + bulk_chunk_size.empty() ? "NULL" : bulk_chunk_size, + bulk_chunk_size_per_counter.empty() ? "NULL" : bulk_chunk_size_per_counter); + } + else if (m_groupsWithBulkChunkSize.find(key) != m_groupsWithBulkChunkSize.end()) + { + setFlexCounterGroupBulkChunkSize(flexCounterGroupMap[key], "NULL", "NULL"); + m_groupsWithBulkChunkSize.erase(key); + } } consumer.m_toSync.erase(it++); diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index 4bc74dc3b8..f3aa03e6c0 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -67,6 +67,7 @@ class FlexCounterOrch: public Orch Table m_bufferQueueConfigTable; Table m_bufferPgConfigTable; Table m_deviceMetadataConfigTable; + std::unordered_set m_groupsWithBulkChunkSize; }; #endif diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index 87c2206e90..a5fa104c9a 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -846,6 +846,19 @@ void IntfsOrch::doTask(Consumer &consumer) m_syncdIntfses[alias] = intfs_entry; m_vrfOrch->increaseVrfRefCount(vrf_id); } + else if (m_syncdIntfses[alias].vrf_id != vrf_id) + { + if (m_syncdIntfses[alias].ip_addresses.size() == 0) + { + m_vrfOrch->decreaseVrfRefCount(m_syncdIntfses[alias].vrf_id); + m_vrfOrch->increaseVrfRefCount(vrf_id); + m_syncdIntfses[alias].vrf_id = vrf_id; + } + else + { + SWSS_LOG_ERROR("Failed to set interface '%s' to VRF ID '%d' because it has IP addresses associated with it.", alias.c_str(), vrf_id); + } + } } else { @@ -1316,8 +1329,21 @@ bool IntfsOrch::removeRouterIntfs(Port &port) return false; } - const auto id = sai_serialize_object_id(port.m_rif_id); - removeRifFromFlexCounter(id, port.m_alias); + bool port_found = false; + for (auto it = m_rifsToAdd.begin(); it != m_rifsToAdd.end(); ++it) + { + if (it->m_rif_id == port.m_rif_id) + { + m_rifsToAdd.erase(it); + port_found = true; + break; + } + } + if (!port_found) + { + const auto id = sai_serialize_object_id(port.m_rif_id); + removeRifFromFlexCounter(id, port.m_alias); + } sai_status_t status = sai_router_intfs_api->remove_router_interface(port.m_rif_id); if (status != SAI_STATUS_SUCCESS) @@ -1746,4 +1772,5 @@ void IntfsOrch::voqSyncIntfState(string &alias, bool isUp) m_tableVoqSystemInterfaceTable->hset(port_alias, "oper_status", isUp ? "up" : "down"); } -} \ No newline at end of file +} + diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index bedf4fd882..1efd16a023 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -38,6 +38,7 @@ extern sai_switch_api_t *sai_switch_api; constexpr bool DEFAULT_ENABLE_ENCRYPT = true; constexpr bool DEFAULT_SCI_IN_SECTAG = false; constexpr sai_macsec_cipher_suite_t DEFAULT_CIPHER_SUITE = SAI_MACSEC_CIPHER_SUITE_GCM_AES_128; +bool saiAclFieldSciMatchSupported = true; static const std::vector macsec_sa_attrs = { @@ -637,6 +638,17 @@ MACsecOrch::MACsecOrch( MACSEC_STAT_POLLING_INTERVAL_MS, true) { SWSS_LOG_ENTER(); + sai_attr_capability_t capability; + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_ACL_TABLE, + SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI, + &capability) == SAI_STATUS_SUCCESS) + { + if (capability.create_implemented == false) + { + SWSS_LOG_DEBUG("SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI is not supported"); + saiAclFieldSciMatchSupported = false; + } + } } MACsecOrch::~MACsecOrch() @@ -2570,9 +2582,12 @@ bool MACsecOrch::createMACsecACLTable( attr.value.booldata = true; attrs.push_back(attr); - attr.id = SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI; - attr.value.booldata = sci_in_sectag; - attrs.push_back(attr); + if (saiAclFieldSciMatchSupported == true) + { + attr.id = SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI; + attr.value.booldata = sci_in_sectag; + attrs.push_back(attr); + } sai_status_t status = sai_acl_api->create_acl_table( &table_id, @@ -2738,7 +2753,7 @@ bool MACsecOrch::createMACsecACLDataEntry( attr.value.aclaction.parameter.s32 = SAI_PACKET_ACTION_DROP; attr.value.aclaction.enable = true; attrs.push_back(attr); - if (sci_in_sectag) + if ((saiAclFieldSciMatchSupported == true) && sci_in_sectag) { attr.id = SAI_ACL_ENTRY_ATTR_FIELD_MACSEC_SCI; attr.value.aclfield.enable = true; diff --git a/orchagent/main.cpp b/orchagent/main.cpp index d50508f6ce..5ab63733a5 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -62,6 +62,9 @@ extern bool gIsNatSupported; #define SWSS_RECORD_ENABLE (0x1 << 1) #define RESPONSE_PUBLISHER_RECORD_ENABLE (0x1 << 2) +/* orchagent heart beat message interval */ +#define HEART_BEAT_INTERVAL_MSECS_DEFAULT 10 * 1000 + string gMySwitchType = ""; int32_t gVoqMySwitchId = -1; int32_t gVoqMaxCores = 0; @@ -73,7 +76,7 @@ uint32_t create_switch_timeout = 0; void usage() { - cout << "usage: orchagent [-h] [-r record_type] [-d record_location] [-f swss_rec_filename] [-j sairedis_rec_filename] [-b batch_size] [-m MAC] [-i INST_ID] [-s] [-z mode] [-k bulk_size] [-q zmq_server_address] [-c mode] [-t create_switch_timeout] [-v VRF]" << endl; + cout << "usage: orchagent [-h] [-r record_type] [-d record_location] [-f swss_rec_filename] [-j sairedis_rec_filename] [-b batch_size] [-m MAC] [-i INST_ID] [-s] [-z mode] [-k bulk_size] [-q zmq_server_address] [-c mode] [-t create_switch_timeout] [-v VRF] [-I heart_beat_interval]" << endl; cout << " -h: display this message" << endl; cout << " -r record_type: record orchagent logs with type (default 3)" << endl; cout << " Bit 0: sairedis.rec, Bit 1: swss.rec, Bit 2: responsepublisher.rec. For example:" << endl; @@ -95,6 +98,7 @@ void usage() cout << " -c counter mode (traditional|asic_db), default: asic_db" << endl; cout << " -t Override create switch timeout, in sec" << endl; cout << " -v vrf: VRF name (default empty)" << endl; + cout << " -I heart_beat_interval: Heart beat interval in millisecond (default 10)" << endl; } void sighup_handler(int signo) @@ -349,8 +353,9 @@ int main(int argc, char **argv) bool enable_zmq = false; string responsepublisher_rec_filename = Recorder::RESPPUB_FNAME; int record_type = 3; // Only swss and sairedis recordings enabled by default. + long heartBeatInterval = HEART_BEAT_INTERVAL_MSECS_DEFAULT; - while ((opt = getopt(argc, argv, "b:m:r:f:j:d:i:hsz:k:q:c:t:v:")) != -1) + while ((opt = getopt(argc, argv, "b:m:r:f:j:d:i:hsz:k:q:c:t:v:I:")) != -1) { switch (opt) { @@ -450,6 +455,22 @@ int main(int argc, char **argv) vrf = optarg; } break; + case 'I': + if (optarg) + { + auto interval = atoi(optarg); + if (interval >= 0) + { + heartBeatInterval = interval; + SWSS_LOG_NOTICE("Setting heartbeat interval as %ld", heartBeatInterval); + } + else + { + heartBeatInterval = HEART_BEAT_INTERVAL_MSECS_DEFAULT; + SWSS_LOG_ERROR("Invalid input for heartbeat interval: %d. use default interval: %ld", interval, heartBeatInterval); + } + } + break; default: /* '?' */ exit(EXIT_FAILURE); } @@ -815,7 +836,7 @@ int main(int argc, char **argv) syncd_apply_view(); } - orchDaemon->start(); + orchDaemon->start(heartBeatInterval); return 0; } diff --git a/orchagent/nexthopgroupkey.h b/orchagent/nexthopgroupkey.h index d012cbe41a..4580beaf5f 100644 --- a/orchagent/nexthopgroupkey.h +++ b/orchagent/nexthopgroupkey.h @@ -13,6 +13,7 @@ class NextHopGroupKey { m_overlay_nexthops = false; m_srv6_nexthops = false; + m_srv6_vpn = false; auto nhv = tokenize(nexthops, NHG_DELIMITER); for (const auto &nh : nhv) { @@ -27,6 +28,7 @@ class NextHopGroupKey { m_overlay_nexthops = true; m_srv6_nexthops = false; + m_srv6_vpn = false; auto nhv = tokenize(nexthops, NHG_DELIMITER); for (const auto &nh_str : nhv) { @@ -38,11 +40,16 @@ class NextHopGroupKey { m_overlay_nexthops = false; m_srv6_nexthops = true; + m_srv6_vpn = false; auto nhv = tokenize(nexthops, NHG_DELIMITER); for (const auto &nh_str : nhv) { auto nh = NextHopKey(nh_str, overlay_nh, srv6_nh); m_nexthops.insert(nh); + if (nh.isSrv6Vpn()) + { + m_srv6_vpn = true; + } } } } @@ -51,6 +58,7 @@ class NextHopGroupKey { m_overlay_nexthops = false; m_srv6_nexthops = false; + m_srv6_vpn = false; std::vector nhv = tokenize(nexthops, NHG_DELIMITER); std::vector wtv = tokenize(weights, NHG_DELIMITER); bool set_weight = wtv.size() == nhv.size(); @@ -221,6 +229,11 @@ class NextHopGroupKey return m_srv6_nexthops; } + inline bool is_srv6_vpn() const + { + return m_srv6_vpn; + } + void clear() { m_nexthops.clear(); @@ -228,8 +241,9 @@ class NextHopGroupKey private: std::set m_nexthops; - bool m_overlay_nexthops; - bool m_srv6_nexthops; + bool m_overlay_nexthops = false; + bool m_srv6_nexthops = false; + bool m_srv6_vpn = false; }; #endif /* SWSS_NEXTHOPGROUPKEY_H */ diff --git a/orchagent/nexthopkey.h b/orchagent/nexthopkey.h index 2f03e9fd49..2c5f913ef9 100644 --- a/orchagent/nexthopkey.h +++ b/orchagent/nexthopkey.h @@ -22,6 +22,7 @@ struct NextHopKey uint32_t weight; // NH weight for NHGs string srv6_segment; // SRV6 segment string string srv6_source; // SRV6 source address + string srv6_vpn_sid; // SRV6 vpn sid NextHopKey() : weight(0) {} NextHopKey(const std::string &str, const std::string &alias) : @@ -76,7 +77,7 @@ struct NextHopKey vni = 0; weight = 0; auto keys = tokenize(str, NH_DELIMITER); - if (keys.size() != 3) + if (keys.size() != 4) { std::string err = "Error converting " + str + " to Nexthop"; throw std::invalid_argument(err); @@ -84,6 +85,7 @@ struct NextHopKey ip_address = keys[0]; srv6_segment = keys[1]; srv6_source = keys[2]; + srv6_vpn_sid = keys[3]; } else { @@ -115,7 +117,8 @@ struct NextHopKey { if (srv6_nh) { - return ip_address.to_string() + NH_DELIMITER + srv6_segment + NH_DELIMITER + srv6_source; + return ip_address.to_string() + NH_DELIMITER + srv6_segment + NH_DELIMITER + srv6_source + NH_DELIMITER + + srv6_vpn_sid + NH_DELIMITER; } std::string str = formatMplsNextHop(); str += (ip_address.to_string() + NH_DELIMITER + alias + NH_DELIMITER + @@ -125,8 +128,8 @@ struct NextHopKey bool operator<(const NextHopKey &o) const { - return tie(ip_address, alias, label_stack, vni, mac_address, srv6_segment, srv6_source) < - tie(o.ip_address, o.alias, o.label_stack, o.vni, o.mac_address, o.srv6_segment, o.srv6_source); + return tie(ip_address, alias, label_stack, vni, mac_address, srv6_segment, srv6_source, srv6_vpn_sid) < + tie(o.ip_address, o.alias, o.label_stack, o.vni, o.mac_address, o.srv6_segment, o.srv6_source, o.srv6_vpn_sid); } bool operator==(const NextHopKey &o) const @@ -134,7 +137,8 @@ struct NextHopKey return (ip_address == o.ip_address) && (alias == o.alias) && (label_stack == o.label_stack) && (vni == o.vni) && (mac_address == o.mac_address) && - (srv6_segment == o.srv6_segment) && (srv6_source == o.srv6_source); + (srv6_segment == o.srv6_segment) && (srv6_source == o.srv6_source) && + (srv6_vpn_sid == o.srv6_vpn_sid); } bool operator!=(const NextHopKey &o) const @@ -154,7 +158,12 @@ struct NextHopKey bool isSrv6NextHop() const { - return (srv6_segment != ""); + return ((srv6_segment != "") || (srv6_vpn_sid != "") || (srv6_source != "")); + } + + bool isSrv6Vpn() const + { + return (srv6_vpn_sid != ""); } std::string parseMplsNextHop(const std::string& str) diff --git a/orchagent/nhgorch.cpp b/orchagent/nhgorch.cpp index b9cf2832cb..2f2678e741 100644 --- a/orchagent/nhgorch.cpp +++ b/orchagent/nhgorch.cpp @@ -2,6 +2,7 @@ #include "neighorch.h" #include "crmorch.h" #include "routeorch.h" +#include "srv6orch.h" #include "bulker.h" #include "logger.h" #include "swssnet.h" @@ -12,6 +13,7 @@ extern IntfsOrch *gIntfsOrch; extern NeighOrch *gNeighOrch; extern RouteOrch *gRouteOrch; extern NhgOrch *gNhgOrch; +extern Srv6Orch *gSrv6Orch; extern size_t gMaxBulkSize; @@ -61,6 +63,9 @@ void NhgOrch::doTask(Consumer& consumer) string mpls_nhs; string nhgs; bool is_recursive = false; + string srv6_source; + bool overlay_nh = false; + bool srv6_nh = false; /* Get group's next hop IPs and aliases */ for (auto i : kfvFieldsValues(t)) @@ -77,6 +82,12 @@ void NhgOrch::doTask(Consumer& consumer) if (fvField(i) == "mpls_nh") mpls_nhs = fvValue(i); + if (fvField(i) == "seg_src") + { + srv6_source = fvValue(i); + srv6_nh = true; + } + if (fvField(i) == "nexthop_group") { nhgs = fvValue(i); @@ -96,9 +107,11 @@ void NhgOrch::doTask(Consumer& consumer) vector alsv = tokenize(aliases, ','); vector mpls_nhv = tokenize(mpls_nhs, ','); vector nhgv = tokenize(nhgs, NHG_DELIMITER); + vector srv6_srcv = tokenize(srv6_source, ','); /* Create the next hop group key. */ string nhg_str; + NextHopGroupKey nhg_key; /* Keeps track of any non-existing member of a recursive nexthop group */ bool non_existent_member = false; @@ -154,28 +167,77 @@ void NhgOrch::doTask(Consumer& consumer) /* Form nexthopgroup key with the nexthopgroup keys of available members */ nhgv = tokenize(nhgs, NHG_DELIMITER); + bool nhg_mismatch = false; for (uint32_t i = 0; i < nhgv.size(); i++) { - if (i) nhg_str += NHG_DELIMITER; + auto k = m_syncdNextHopGroups.at(nhgv[i]).nhg->getKey(); + if (i) + { + if (k.is_srv6_nexthop() != srv6_nh || k.is_overlay_nexthop() != overlay_nh) + { + SWSS_LOG_ERROR("Inconsistent nexthop group type between %s and %s", + m_syncdNextHopGroups.at(nhgv[0]).nhg->getKey().to_string().c_str(), + k.to_string().c_str()); + nhg_mismatch = true; + break; + } + nhg_str += NHG_DELIMITER; + } + else + { + srv6_nh = k.is_srv6_nexthop(); + overlay_nh = k.is_overlay_nexthop(); + } nhg_str += m_syncdNextHopGroups.at(nhgv[i]).nhg->getKey().to_string(); } + + if (nhg_mismatch) + { + it = consumer.m_toSync.erase(it); + continue; + } + + if (srv6_nh) + nhg_key = NextHopGroupKey(nhg_str, overlay_nh, srv6_nh); + else + nhg_key = NextHopGroupKey(nhg_str, weights); } else { - for (uint32_t i = 0; i < ipv.size(); i++) + if (srv6_nh) { - if (i) nhg_str += NHG_DELIMITER; - if (!mpls_nhv.empty() && mpls_nhv[i] != "na") + if (ipv.size() != srv6_srcv.size()) + { + SWSS_LOG_ERROR("inconsistent number of endpoints and srv6_srcs."); + it = consumer.m_toSync.erase(it); + continue; + } + for (uint32_t i = 0; i < ipv.size(); i++) { - nhg_str += mpls_nhv[i] + LABELSTACK_DELIMITER; + if (i) nhg_str += NHG_DELIMITER; + nhg_str += ipv[i] + NH_DELIMITER; // ip address + nhg_str += NH_DELIMITER; // srv6 segment + nhg_str += srv6_srcv[i] + NH_DELIMITER; // srv6 source + nhg_str += NH_DELIMITER; // srv6 vpn sid + } + nhg_key = NextHopGroupKey(nhg_str, overlay_nh, srv6_nh); + } + else + { + for (uint32_t i = 0; i < ipv.size(); i++) + { + if (i) nhg_str += NHG_DELIMITER; + if (!mpls_nhv.empty() && mpls_nhv[i] != "na") + { + nhg_str += mpls_nhv[i] + LABELSTACK_DELIMITER; + } + nhg_str += ipv[i] + NH_DELIMITER + alsv[i]; } - nhg_str += ipv[i] + NH_DELIMITER + alsv[i]; + nhg_key = NextHopGroupKey(nhg_str, weights); } } - NextHopGroupKey nhg_key = NextHopGroupKey(nhg_str, weights); - /* If the group does not exist, create one. */ if (nhg_it == m_syncdNextHopGroups.end()) { @@ -192,6 +254,13 @@ void NhgOrch::doTask(Consumer& consumer) { SWSS_LOG_DEBUG("Next hop group count reached its limit."); + // don't create temp nhg for srv6 + if (nhg_key.is_srv6_nexthop()) + { + ++it; + continue; + } + try { auto nhg = std::make_unique(createTempNhg(nhg_key)); @@ -476,6 +545,14 @@ sai_object_id_t NextHopGroupMember::getNhId() const else if (gNeighOrch->hasNextHop(m_key)) { nh_id = gNeighOrch->getNextHopId(m_key); + if (m_key.isSrv6NextHop()) + { + SWSS_LOG_INFO("Single NH: create srv6 nexthop %s", m_key.to_string(false, true).c_str()); + if (!gSrv6Orch->createSrv6NexthopWithoutVpn(m_key, nh_id)) + { + SWSS_LOG_ERROR("Failed to create SRv6 nexthop %s", m_key.to_string(false, true).c_str()); + } + } } /* * If the next hop is labeled and the IP next hop exists, create the @@ -494,7 +571,20 @@ sai_object_id_t NextHopGroupMember::getNhId() const } else { - gNeighOrch->resolveNeighbor(m_key); + if (m_key.isSrv6NextHop()) + { + SWSS_LOG_INFO("Single NH: create srv6 nexthop %s", m_key.to_string(false, true).c_str()); + if (!gSrv6Orch->createSrv6NexthopWithoutVpn(m_key, nh_id)) + { + SWSS_LOG_ERROR("Failed to create SRv6 nexthop %s", m_key.to_string(false, true).c_str()); + } + } + else + { + SWSS_LOG_INFO("Failed to get next hop %s, resolving neighbor", + m_key.to_string().c_str()); + gNeighOrch->resolveNeighbor(m_key); + } } return nh_id; @@ -570,6 +660,14 @@ NextHopGroupMember::~NextHopGroupMember() { SWSS_LOG_ENTER(); + if (m_key.isSrv6NextHop() && gNeighOrch->hasNextHop(m_key) && + !gNeighOrch->getNextHopRefCount(m_key)) + { + if (!gSrv6Orch->removeSrv6NexthopWithoutVpn(m_key)) + { + SWSS_LOG_ERROR("SRv6 Nexthop %s delete failed", m_key.to_string(false, true).c_str()); + } + } /* * If the labeled next hop is unreferenced, remove it from NeighOrch as * NhgOrch and RouteOrch are the ones controlling it's lifetime. They both @@ -577,7 +675,7 @@ NextHopGroupMember::~NextHopGroupMember() * them as they're both doing the same checks before removing a labeled * next hop. */ - if (isLabeled() && + else if (isLabeled() && gNeighOrch->hasNextHop(m_key) && (gNeighOrch->getNextHopRefCount(m_key) == 0)) { @@ -824,6 +922,7 @@ bool NextHopGroup::syncMembers(const std::set& nh_keys) */ std::map syncingMembers; + bool success = true; for (const auto& nh_key : nh_keys) { NextHopGroupMember& nhgm = m_members.at(nh_key); @@ -841,7 +940,8 @@ bool NextHopGroup::syncMembers(const std::set& nh_keys) { SWSS_LOG_WARN("Failed to get next hop %s in group %s", nhgm.to_string().c_str(), to_string().c_str()); - return false; + success = false; + continue; } /* If the neighbor's interface is down, skip from being syncd. */ @@ -868,7 +968,6 @@ bool NextHopGroup::syncMembers(const std::set& nh_keys) * Go through the synced members and increment the Crm ref count for the * successful ones. */ - bool success = true; for (const auto& mbr : syncingMembers) { /* Check that the returned member ID is valid. */ @@ -941,7 +1040,7 @@ bool NextHopGroup::update(const NextHopGroupKey& nhg_key) /* If the member is updated, update it's weight. */ else { - if (!mbr_it.second.updateWeight(new_nh_key_it->weight)) + if (new_nh_key_it->weight && mbr_it.second.getWeight() != new_nh_key_it->weight && !mbr_it.second.updateWeight(new_nh_key_it->weight)) { SWSS_LOG_WARN("Failed to update member %s weight", nh_key.to_string().c_str()); return false; diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 6c6b2afa78..edcda386bd 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -30,6 +30,20 @@ Orch::Orch(DBConnector *db, const vector &tableNames) } } +Orch::Orch(swss::DBConnector *db1, swss::DBConnector *db2, + const std::vector &tableNames_1, const std::vector &tableNames_2) +{ + for(auto it : tableNames_1) + { + addConsumer(db1, it, default_orch_pri); + } + + for(auto it : tableNames_2) + { + addConsumer(db2, it, default_orch_pri); + } +} + Orch::Orch(DBConnector *db, const vector &tableNames_with_pri) { for (const auto& it : tableNames_with_pri) diff --git a/orchagent/orch.h b/orchagent/orch.h index 86b4a64c43..f956b7e139 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -221,6 +221,8 @@ class Orch public: Orch(swss::DBConnector *db, const std::string tableName, int pri = default_orch_pri); Orch(swss::DBConnector *db, const std::vector &tableNames); + Orch(swss::DBConnector *db1, swss::DBConnector *db2, + const std::vector &tableNames_1, const std::vector &tableNames_2); Orch(swss::DBConnector *db, const std::vector &tableNameWithPri); Orch(const std::vector& tables); virtual ~Orch() = default; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 7863eb0464..9853078244 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -23,9 +23,6 @@ using namespace swss; #define APP_FABRIC_MONITOR_PORT_TABLE_NAME "FABRIC_PORT_TABLE" #define APP_FABRIC_MONITOR_DATA_TABLE_NAME "FABRIC_MONITOR_TABLE" -/* orchagent heart beat message interval */ -#define HEART_BEAT_INTERVAL_MSECS 10 * 1000 - extern sai_switch_api_t* sai_switch_api; extern sai_object_id_t gSwitchId; extern string gMySwitchType; @@ -222,7 +219,8 @@ bool OrchDaemon::init() vector srv6_tables = { APP_SRV6_SID_LIST_TABLE_NAME, - APP_SRV6_MY_SID_TABLE_NAME + APP_SRV6_MY_SID_TABLE_NAME, + APP_PIC_CONTEXT_TABLE_NAME }; gSrv6Orch = new Srv6Orch(m_applDb, srv6_tables, gSwitchOrch, vrf_orch, gNeighOrch); gDirectory.set(gSrv6Orch); @@ -827,7 +825,7 @@ void OrchDaemon::logRotate() { } -void OrchDaemon::start() +void OrchDaemon::start(long heartBeatInterval) { SWSS_LOG_ENTER(); @@ -848,7 +846,7 @@ void OrchDaemon::start() ret = m_select->select(&s, SELECT_TIMEOUT); auto tend = std::chrono::high_resolution_clock::now(); - heartBeat(tend); + heartBeat(tend, heartBeatInterval); auto diff = std::chrono::duration_cast(tend - tstart); @@ -925,7 +923,7 @@ void OrchDaemon::start() flush(); SWSS_LOG_WARN("Orchagent is frozen for warm restart!"); - freezeAndHeartBeat(UINT_MAX); + freezeAndHeartBeat(UINT_MAX, heartBeatInterval); } } } @@ -1093,11 +1091,17 @@ void OrchDaemon::addOrchList(Orch *o) m_orchList.push_back(o); } -void OrchDaemon::heartBeat(std::chrono::time_point tcurrent) +void OrchDaemon::heartBeat(std::chrono::time_point tcurrent, long interval) { + if (interval == 0) + { + // disable heart beat feature when interval is 0 + return; + } + // output heart beat message to SYSLOG auto diff = std::chrono::duration_cast(tcurrent - m_lastHeartBeat); - if (diff.count() >= HEART_BEAT_INTERVAL_MSECS) + if (diff.count() >= interval) { m_lastHeartBeat = tcurrent; // output heart beat message to supervisord with 'PROCESS_COMMUNICATION_STDOUT' event: http://supervisord.org/events.html @@ -1105,13 +1109,13 @@ void OrchDaemon::heartBeat(std::chrono::time_point 0) { // Send heartbeat message to prevent Orchagent stuck alert. auto tend = std::chrono::high_resolution_clock::now(); - heartBeat(tend); + heartBeat(tend, interval); duration--; sleep(1); diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index 6a1c0b999a..e1e954d51d 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -63,7 +63,7 @@ class OrchDaemon ~OrchDaemon(); virtual bool init(); - void start(); + void start(long heartBeatInterval); bool warmRestoreAndSyncUp(); void getTaskToSync(vector &ts); bool warmRestoreValidation(); @@ -102,9 +102,9 @@ class OrchDaemon void flush(); - void heartBeat(std::chrono::time_point tcurrent); + void heartBeat(std::chrono::time_point tcurrent, long interval); - void freezeAndHeartBeat(unsigned int duration); + void freezeAndHeartBeat(unsigned int duration, long interval); }; class FabricOrchDaemon : public OrchDaemon diff --git a/orchagent/pfc_detect_mellanox.lua b/orchagent/pfc_detect_mellanox.lua old mode 100644 new mode 100755 index 5e6d8c00c5..e00243fa65 --- a/orchagent/pfc_detect_mellanox.lua +++ b/orchagent/pfc_detect_mellanox.lua @@ -18,13 +18,20 @@ local timestamp_struct = redis.call('TIME') local timestamp_current = timestamp_struct[1] + timestamp_struct[2] / 1000000 local timestamp_string = tostring(timestamp_current) redis.call('HSET', 'TIMESTAMP', 'pfcwd_poll_timestamp_last', timestamp_string) -local effective_poll_time = poll_time -local effective_poll_time_lasttime = redis.call('HGET', 'TIMESTAMP', 'effective_pfcwd_poll_time_last') +local global_effective_poll_time = poll_time +local global_effective_poll_time_lasttime = redis.call('HGET', 'TIMESTAMP', 'effective_pfcwd_poll_time_last') if timestamp_last ~= false then - effective_poll_time = (timestamp_current - tonumber(timestamp_last)) * 1000000 - redis.call('HSET', 'TIMESTAMP', 'effective_pfcwd_poll_time_last', effective_poll_time) + global_effective_poll_time = (timestamp_current - tonumber(timestamp_last)) * 1000000 + redis.call('HSET', 'TIMESTAMP', 'effective_pfcwd_poll_time_last', global_effective_poll_time) end +local effective_poll_time +local effective_poll_time_lasttime +local port_timestamp_last_cache = {} + +local debug_storm_global = redis.call('HGET', 'DEBUG_STORM', 'enabled') == 'true' +local debug_storm_threshold = tonumber(redis.call('HGET', 'DEBUG_STORM', 'threshold')) + -- Iterate through each queue local n = table.getn(KEYS) for i = n, 1, -1 do @@ -56,12 +63,37 @@ for i = n, 1, -1 do local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION_US' + -- Get port specific timestamp + local port_timestamp_current = tonumber(redis.call('HGET', counters_table_name .. ':' .. port_id, 'PFC_WD_time_stamp')) + if port_timestamp_current ~= nil then + local port_timestamp_lasttime = port_timestamp_last_cache[port_id] + if port_timestamp_lasttime == nil then + port_timestamp_lasttime = tonumber(redis.call('HGET', counters_table_name .. ':' .. port_id, 'PFC_WD_time_stamp_last')) + port_timestamp_last_cache[port_id] = port_timestamp_lasttime + redis.call('HSET', counters_table_name .. ':' .. port_id, 'PFC_WD_time_stamp_last', port_timestamp_current) + end + + if port_timestamp_lasttime ~= nil then + effective_poll_time = (port_timestamp_current - port_timestamp_lasttime) / 1000 + else + effective_poll_time = global_effective_poll_time + end + effective_poll_time_lasttime = false + else + effective_poll_time = global_effective_poll_time + effective_poll_time_lasttime = global_effective_poll_time_lasttime + end + -- Get all counters local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + if debug_storm_global then + redis.call('PUBLISH', 'PFC_WD_DEBUG', 'Port ID ' .. port_id .. ' Queue index ' .. queue_index .. ' occupancy ' .. occupancy_bytes .. ' packets ' .. packets .. ' pfc rx ' .. pfc_rx_packets .. ' pfc duration ' .. pfc_duration .. ' effective poll time ' .. tostring(effective_poll_time) .. '(global ' .. tostring(global_effective_poll_time) .. ')') + end + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then occupancy_bytes = tonumber(occupancy_bytes) packets = tonumber(packets) @@ -82,6 +114,10 @@ for i = n, 1, -1 do pfc_duration_last = tonumber(pfc_duration_last) local storm_condition = (pfc_duration - pfc_duration_last) > (effective_poll_time * 0.99) + if debug_storm_threshold ~= nil and (pfc_duration - pfc_duration_last) > (effective_poll_time * debug_storm_threshold / 100) then + redis.call('PUBLISH', 'PFC_WD_DEBUG', 'Port ID ' .. port_id .. ' Queue index ' .. queue_index .. ' occupancy ' .. occupancy_bytes .. ' packets ' .. packets .. ' pfc rx ' .. pfc_rx_packets .. ' pfc duration ' .. pfc_duration .. ' effective poll time ' .. tostring(effective_poll_time) .. ', triggered by threshold ' .. debug_storm_threshold .. '%') + end + -- Check actual condition of queue being in PFC storm if (occupancy_bytes > 0 and packets - packets_last == 0 and storm_condition) or -- DEBUG CODE START. Uncomment to enable diff --git a/orchagent/port/porthlpr.cpp b/orchagent/port/porthlpr.cpp index 181fef9f69..3adb7ba9e9 100644 --- a/orchagent/port/porthlpr.cpp +++ b/orchagent/port/porthlpr.cpp @@ -1229,6 +1229,12 @@ bool PortHelper::parsePortConfig(PortConfig &port) const return false; } } + else if (field == PORT_MODE) + { + /* Placeholder to prevent warning. Not needed to be parsed here. + * Setting exists in sonic-port.yang with possible values: routed|access|trunk + */ + } else { SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); diff --git a/orchagent/port/portschema.h b/orchagent/port/portschema.h index 8dd7f79200..3e48b16d8c 100644 --- a/orchagent/port/portschema.h +++ b/orchagent/port/portschema.h @@ -101,3 +101,4 @@ #define PORT_SUPPRESS_THRESHOLD "suppress_threshold" #define PORT_REUSE_THRESHOLD "reuse_threshold" #define PORT_FLAP_PENALTY "flap_penalty" +#define PORT_MODE "mode" diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 9e265395ce..9475ec16f4 100644 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1819,8 +1819,16 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) void PortsOrch::setHostTxReady(Port port, const std::string &status) { - SWSS_LOG_NOTICE("Setting host_tx_ready status = %s, alias = %s, port_id = 0x%" PRIx64, status.c_str(), port.m_alias.c_str(), port.m_port_id); - m_portStateTable.hset(port.m_alias, "host_tx_ready", status); + vector tuples; + bool exist; + + /* If the port is revmoed, don't need to update StateDB*/ + exist = m_portStateTable.get(port.m_alias, tuples); + if (exist) + { + SWSS_LOG_NOTICE("Setting host_tx_ready status = %s, alias = %s, port_id = 0x%" PRIx64, status.c_str(), port.m_alias.c_str(), port.m_port_id); + m_portStateTable.hset(port.m_alias, "host_tx_ready", status); + } } bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) @@ -2109,6 +2117,10 @@ bool PortsOrch::setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set PFC mode %d to port id 0x%" PRIx64 " (rc:%d)", pfc_asym, port.m_port_id, status); + if (status == SAI_STATUS_NOT_SUPPORTED) + { + return true; + } task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -6716,7 +6728,8 @@ bool PortsOrch::addVlanMember(Port &vlan, Port &port, string &tagging_mode, stri port.m_alias.c_str(), vlan.m_alias.c_str(), vlan.m_vlan_info.vlan_id, port.m_port_id); /* Use untagged VLAN as pvid of the member port */ - if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED) + if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED && + port.m_type != Port::TUNNEL) { if(!setPortPvid(port, vlan.m_vlan_info.vlan_id)) { @@ -7051,7 +7064,8 @@ bool PortsOrch::removeVlanMember(Port &vlan, Port &port, string end_point_ip) port.m_alias.c_str(), vlan.m_alias.c_str(), vlan.m_vlan_info.vlan_id, vlan_member_id); /* Restore to default pvid if this port joined this VLAN in untagged mode previously */ - if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED) + if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED && + port.m_type != Port::TUNNEL) { if (!setPortPvid(port, DEFAULT_PORT_VLAN_ID)) { @@ -7511,7 +7525,9 @@ bool PortsOrch::addTunnel(string tunnel_alias, sai_object_id_t tunnel_id, bool h { tunnel.m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE; } + tunnel.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; m_portList[tunnel_alias] = tunnel; + saiOidToAlias[tunnel_id] = tunnel_alias; SWSS_LOG_INFO("addTunnel:: %" PRIx64, tunnel_id); @@ -7522,6 +7538,7 @@ bool PortsOrch::removeTunnel(Port tunnel) { SWSS_LOG_ENTER(); + saiOidToAlias.erase(tunnel.m_tunnel_id); m_portList.erase(tunnel.m_alias); return true; @@ -8471,9 +8488,10 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) return; } + updateDbPortOperStatus(port, status); + if (port.m_type == Port::PHY) { - updateDbPortOperStatus(port, status); updateDbPortFlapCount(port, status); updateGearboxPortOperStatus(port); diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 2903cd0342..314e3027db 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -1,4 +1,6 @@ #include +#include +#include #include #include #include "routeorch.h" @@ -626,11 +628,15 @@ void RouteOrch::doTask(Consumer& consumer) string remote_macs; string weights; string nhg_index; + string context_index; bool& excp_intfs_flag = ctx.excp_intfs_flag; bool overlay_nh = false; bool blackhole = false; string srv6_segments; string srv6_source; + string srv6_vpn_sids; + bool srv6_seg = false; + bool srv6_vpn = false; bool srv6_nh = false; for (auto i : kfvFieldsValues(t)) @@ -663,16 +669,31 @@ void RouteOrch::doTask(Consumer& consumer) if (fvField(i) == "segment") { srv6_segments = fvValue(i); + srv6_seg = true; srv6_nh = true; } - if (fvField(i) == "seg_src") + if (fvField(i) == "seg_src") { srv6_source = fvValue(i); + srv6_nh = true; + } if (fvField(i) == "protocol") { ctx.protocol = fvValue(i); } + + if (fvField(i) == "vpn_sid") { + srv6_vpn_sids = fvValue(i); + srv6_nh = true; + srv6_vpn = true; + } + + if (fvField(i) == "pic_context_id") + { + context_index = fvValue(i); + srv6_vpn = true; + } } /* @@ -687,6 +708,7 @@ void RouteOrch::doTask(Consumer& consumer) } ctx.nhg_index = nhg_index; + ctx.context_index = context_index; /* * If the nexthop_group is empty, create the next hop group key @@ -701,6 +723,7 @@ void RouteOrch::doTask(Consumer& consumer) NextHopGroupKey& nhg = ctx.nhg; vector srv6_segv; vector srv6_src; + vector srv6_vpn_sidv; bool l3Vni = true; uint32_t vni = 0; @@ -714,6 +737,7 @@ void RouteOrch::doTask(Consumer& consumer) rmacv = tokenize(remote_macs, ','); srv6_segv = tokenize(srv6_segments, ','); srv6_src = tokenize(srv6_source, ','); + srv6_vpn_sidv = tokenize(srv6_vpn_sids, ','); /* * For backward compatibility, adjust ip string from old format to @@ -800,25 +824,29 @@ void RouteOrch::doTask(Consumer& consumer) } else if (srv6_nh == true) { - string ip; - if (ipv.empty()) + if (srv6_vpn && (srv6_vpn_sidv.size() != srv6_src.size())) { - ip = "0.0.0.0"; + SWSS_LOG_ERROR("inconsistent number of endpoints and srv6 vpn sids."); + it = consumer.m_toSync.erase(it); + continue; } - else + + if (srv6_seg && (srv6_segv.size() != srv6_src.size())) { - SWSS_LOG_ERROR("For SRV6 nexthop ipv should be empty"); + SWSS_LOG_ERROR("inconsistent number of srv6_segv and srv6_srcs."); it = consumer.m_toSync.erase(it); continue; } - nhg_str = ip + NH_DELIMITER + srv6_segv[0] + NH_DELIMITER + srv6_src[0]; - for (uint32_t i = 1; i < srv6_segv.size(); i++) + for (uint32_t i = 0; i < srv6_src.size(); i++) { - nhg_str += NHG_DELIMITER + ip; - nhg_str += NH_DELIMITER + srv6_segv[i]; - nhg_str += NH_DELIMITER + srv6_src[i]; + if (i) nhg_str += NHG_DELIMITER; + nhg_str += (ipv.size() > i ? ipv[i] : "0.0.0.0") + NH_DELIMITER; // ip address + nhg_str += (srv6_seg ? srv6_segv[i] : "") + NH_DELIMITER; // srv6 segment + nhg_str += srv6_src[i] + NH_DELIMITER; // srv6 source + nhg_str += (srv6_vpn ? srv6_vpn_sidv[i] : "") + NH_DELIMITER; // srv6 vpn sid } + nhg = NextHopGroupKey(nhg_str, overlay_nh, srv6_nh); SWSS_LOG_INFO("SRV6 route with nhg %s", nhg.to_string().c_str()); } @@ -930,7 +958,7 @@ void RouteOrch::doTask(Consumer& consumer) */ else if (m_syncdRoutes.find(vrf_id) == m_syncdRoutes.end() || m_syncdRoutes.at(vrf_id).find(ip_prefix) == m_syncdRoutes.at(vrf_id).end() || - m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index) || + m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index, ctx.context_index) || gRouteBulker.bulk_entry_pending_removal(route_entry) || ctx.using_temp_nhg) { @@ -975,6 +1003,8 @@ void RouteOrch::doTask(Consumer& consumer) // Go through the bulker results auto it_prev = consumer.m_toSync.begin(); m_bulkNhgReducedRefCnt.clear(); + m_bulkSrv6NhgReducedVec.clear(); + while (it_prev != it) { KeyOpFieldsValuesTuple t = it_prev->second; @@ -999,6 +1029,11 @@ void RouteOrch::doTask(Consumer& consumer) const sai_object_id_t& vrf_id = ctx.vrf_id; const IpPrefix& ip_prefix = ctx.ip_prefix; + sai_route_entry_t route_entry; + route_entry.vr_id = vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, ip_prefix); + if (op == SET_COMMAND) { const bool& excp_intfs_flag = ctx.excp_intfs_flag; @@ -1025,7 +1060,8 @@ void RouteOrch::doTask(Consumer& consumer) } else if (m_syncdRoutes.find(vrf_id) == m_syncdRoutes.end() || m_syncdRoutes.at(vrf_id).find(ip_prefix) == m_syncdRoutes.at(vrf_id).end() || - m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index) || + m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index, ctx.context_index) || + gRouteBulker.bulk_entry_pending_removal(route_entry) || ctx.using_temp_nhg) { if (addRoutePost(ctx, nhg)) @@ -1051,25 +1087,18 @@ void RouteOrch::doTask(Consumer& consumer) { removeOverlayNextHops(it_nhg.second, it_nhg.first); } - else if (it_nhg.first.is_srv6_nexthop()) - { - if(it_nhg.first.getSize() > 1) - { - if(m_syncdNextHopGroups[it_nhg.first].ref_count == 0) - { - removeNextHopGroup(it_nhg.first); - } - else - { - SWSS_LOG_ERROR("SRV6 ECMP %s REF count is not zero", it_nhg.first.to_string().c_str()); - } - } - } else if (m_syncdNextHopGroups[it_nhg.first].ref_count == 0) { removeNextHopGroup(it_nhg.first); } } + + /* Reduce reference for srv6 next hop group */ + /* Later delete for increase refcnt early */ + if (!m_bulkSrv6NhgReducedVec.empty()) + { + m_srv6Orch->removeSrv6Nexthops(m_bulkSrv6NhgReducedVec); + } } } @@ -1332,7 +1361,11 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) nhopgroup_shared_set[next_hop_id].insert(it); } } - + if (!next_hop_ids.size()) + { + SWSS_LOG_INFO("Skipping creation of nexthop group as none of nexthop are active"); + return false; + } sai_attribute_t nhg_attr; vector nhg_attrs; @@ -1554,18 +1587,6 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) } } - if (srv6_nh) - { - if (!m_srv6Orch->removeSrv6Nexthops(nexthops)) - { - SWSS_LOG_ERROR("Failed to remove Srv6 Nexthop %s", nexthops.to_string().c_str()); - } - else - { - SWSS_LOG_INFO("Remove ECMP Srv6 nexthops %s", nexthops.to_string().c_str()); - } - } - m_syncdNextHopGroups.erase(nexthops); return true; @@ -1717,9 +1738,15 @@ void RouteOrch::addTempRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextH SWSS_LOG_INFO("Failed to get next hop %s for %s", (*it).to_string().c_str(), ipPrefix.to_string().c_str()); it = next_hop_set.erase(it); + continue; + } + if(m_neighOrch->isNextHopFlagSet(*it, NHFLAGS_IFDOWN)) + { + SWSS_LOG_INFO("Interface down for NH %s, skip this NH", (*it).to_string().c_str()); + it = next_hop_set.erase(it); + continue; } - else - it++; + it++; } /* Return if next_hop_set is empty */ @@ -1834,6 +1861,15 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) if (m_neighOrch->hasNextHop(nexthop)) { next_hop_id = m_neighOrch->getNextHopId(nexthop); + if (srv6_nh) + { + SWSS_LOG_INFO("Single NH: create srv6 vpn %s", nextHops.to_string().c_str()); + if (!m_srv6Orch->srv6Nexthops(nextHops, next_hop_id)) + { + SWSS_LOG_ERROR("Failed to create SRV6 vpn %s", nextHops.to_string().c_str()); + return false; + } + } } /* For non-existent MPLS NH, check if IP neighbor NH exists */ else if (nexthop.isMplsNextHop() && @@ -1885,22 +1921,38 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) /* The route is pointing to a next hop group */ else { + /* Need to call srv6nexthops() always for srv6 route, */ + /* regardless of whether there is already an existing next hop group */ + /* because vpn refcount need to be add if need */ + if (srv6_nh) + { + sai_object_id_t temp_nh_id; + SWSS_LOG_INFO("ECMP SRV6 NH: handle srv6 nexthops %s", nextHops.to_string().c_str()); + if(!m_srv6Orch->srv6Nexthops(nextHops, temp_nh_id)) + { + SWSS_LOG_ERROR("Failed to handle SRV6 nexthops for %s", nextHops.to_string().c_str()); + return false; + } + } + /* Check if there is already an existing next hop group */ if (!hasNextHopGroup(nextHops)) { - if(srv6_nh) + /* Try to create a new next hop group */ + if (!addNextHopGroup(nextHops)) { - sai_object_id_t temp_nh_id; - SWSS_LOG_INFO("ECMP SRV6 NH: create srv6 nexthops %s", nextHops.to_string().c_str()); - if(!m_srv6Orch->srv6Nexthops(nextHops, temp_nh_id)) + /* If the nexthop is a srv6 nexthop, not create tempRoute + * retry to add route */ + if (nextHops.is_srv6_nexthop()) { - SWSS_LOG_ERROR("Failed to create SRV6 nexthops for %s", nextHops.to_string().c_str()); return false; } - } - /* Try to create a new next hop group */ - if (!addNextHopGroup(nextHops)) - { + + if (it_route != m_syncdRoutes.at(vrf_id).end() && it_route->second.nhg_key.is_srv6_nexthop()) + { + return false; + } + for(auto it = nextHops.getNextHops().begin(); it != nextHops.getNextHops().end(); ++it) { const NextHopKey& nextHop = *it; @@ -1963,6 +2015,8 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) copy(route_entry.destination, ipPrefix); sai_attribute_t route_attr; + vector attrs; + vector<_sai_attribute_t> route_attrs; auto& object_statuses = ctx.object_statuses; /* If the prefix is not in m_syncdRoutes, then we need to create the route @@ -1981,16 +2035,30 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) { route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; route_attr.value.s32 = SAI_PACKET_ACTION_DROP; + route_attrs.push_back(route_attr); } else { route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; route_attr.value.oid = next_hop_id; + route_attrs.push_back(route_attr); + } + + if (!ctx.context_index.empty() || nextHops.is_srv6_vpn()) + { + if (!ctx.context_index.empty() && !m_srv6Orch->contextIdExists(ctx.context_index)) + { + SWSS_LOG_INFO("Context id %s does not exist", ctx.context_index.c_str()); + return false; + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID; + route_attr.value.u32 = ctx.nhg_index.empty() ? m_srv6Orch->getAggId(nextHops) : m_srv6Orch->getAggId(ctx.context_index); + route_attrs.push_back(route_attr); } /* Default SAI_ROUTE_ATTR_PACKET_ACTION is SAI_PACKET_ACTION_FORWARD */ object_statuses.emplace_back(); - sai_status_t status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, 1, &route_attr); + sai_status_t status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)route_attrs.size(), route_attrs.data()); if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { SWSS_LOG_ERROR("Failed to create route %s with next hop(s) %s: already exists in bulker", @@ -2037,6 +2105,21 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); } + // Set update preifx agg id if need + if (nextHops.is_srv6_vpn() || + (it_route->second.context_index != ctx.context_index && !ctx.context_index.empty())) + { + if (!ctx.context_index.empty() && !m_srv6Orch->contextIdExists(ctx.context_index)) + { + SWSS_LOG_INFO("Context id %s does not exist", ctx.context_index.c_str()); + return false; + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID; + route_attr.value.u32 = ctx.nhg_index.empty() ? m_srv6Orch->getAggId(nextHops) : m_srv6Orch->getAggId(ctx.context_index); + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; route_attr.value.oid = next_hop_id; @@ -2126,7 +2209,9 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey { // Previous added an temporary route auto& tmp_next_hop = ctx.tmp_next_hop; - addRoutePost(ctx, tmp_next_hop); + if (tmp_next_hop.getSize() > 0) { + addRoutePost(ctx, tmp_next_hop); + } return false; } } @@ -2215,7 +2300,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey } else { - incNhgRefCount(ctx.nhg_index); + incNhgRefCount(ctx.nhg_index, ctx.context_index); } SWSS_LOG_INFO("Post create route %s with next hop(s) %s", @@ -2263,6 +2348,10 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey { decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; + if (ol_nextHops.is_srv6_nexthop()) + { + m_bulkSrv6NhgReducedVec.emplace_back(ol_nextHops); + } if (ol_nextHops.getSize() > 1) { if (m_syncdNextHopGroups[ol_nextHops].ref_count == 0) @@ -2293,11 +2382,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); } } - else if (ol_nextHops.is_srv6_nexthop()) - { - m_srv6Orch->removeSrv6Nexthops(ol_nextHops); - } - else if (ol_nextHops.getSize() == 1) + else if (ol_nextHops.getSize() == 1 && !ol_nextHops.is_srv6_nexthop()) { RouteKey r_key = { vrf_id, ipPrefix }; auto nexthop = NextHopKey(ol_nextHops.to_string()); @@ -2307,7 +2392,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey /* The next hop group is owned by (Cbf)NhgOrch. */ else { - decNhgRefCount(it_route->second.nhg_index); + decNhgRefCount(it_route->second.nhg_index, it_route->second.context_index); } if (blackhole) @@ -2333,7 +2418,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey } else { - incNhgRefCount(ctx.nhg_index); + incNhgRefCount(ctx.nhg_index, ctx.context_index); } SWSS_LOG_INFO("Post set route %s with next hop(s) %s", @@ -2372,7 +2457,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey gFlowCounterRouteOrch->handleRouteAdd(vrf_id, ipPrefix); } - m_syncdRoutes[vrf_id][ipPrefix] = RouteNhg(nextHops, ctx.nhg_index); + m_syncdRoutes[vrf_id][ipPrefix] = RouteNhg(nextHops, ctx.nhg_index, ctx.context_index); /* add subnet decap term for VIP route */ const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); @@ -2423,8 +2508,22 @@ bool RouteOrch::removeRoute(RouteBulkContext& ctx) size_t creating = gRouteBulker.creating_entries_count(route_entry); if (it_route == it_route_table->second.end() && creating == 0) { + /* + * Clean up the VRF routing table if + * 1. there is no routing entry in the VRF routing table and + * 2. there is no pending bulk creation routing entry in gRouteBulker + * The ideal way of the 2nd condition is to check pending bulk creation entries of a certain VRF. + * However, we can not do that unless going over all entries in gRouteBulker. + * So, we use above strict conditions here + */ + if (it_route_table->second.size() == 0 && gRouteBulker.creating_entries_count() == 0) + { + m_syncdRoutes.erase(vrf_id); + m_vrfOrch->decreaseVrfRefCount(vrf_id); + } SWSS_LOG_INFO("Failed to find route entry, vrf_id 0x%" PRIx64 ", prefix %s\n", vrf_id, - ipPrefix.to_string().c_str()); + ipPrefix.to_string().c_str()); + return true; } @@ -2538,7 +2637,7 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) /* Check if the next hop group is not owned by NhgOrch. */ else if (!it_route->second.nhg_index.empty()) { - decNhgRefCount(it_route->second.nhg_index); + decNhgRefCount(it_route->second.nhg_index, it_route->second.context_index); } /* The NHG is owned by RouteOrch */ else @@ -2549,6 +2648,12 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; + + if (ol_nextHops.is_srv6_nexthop()) + { + m_bulkSrv6NhgReducedVec.emplace_back(ol_nextHops); + } + MuxOrch* mux_orch = gDirectory.get(); if (it_route->second.nhg_key.getSize() > 1) { @@ -2594,11 +2699,6 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) { m_neighOrch->removeMplsNextHop(nexthop); } - else if (nexthop.isSrv6NextHop() && - (m_neighOrch->getNextHopRefCount(nexthop) == 0)) - { - m_srv6Orch->removeSrv6Nexthops(it_route->second.nhg_key); - } RouteKey r_key = { vrf_id, ipPrefix }; removeNextHopRoute(nexthop, r_key); @@ -2788,7 +2888,7 @@ const NhgBase &RouteOrch::getNhg(const std::string &nhg_index) } } -void RouteOrch::incNhgRefCount(const std::string &nhg_index) +void RouteOrch::incNhgRefCount(const std::string &nhg_index, const std::string &context_index) { SWSS_LOG_ENTER(); @@ -2800,9 +2900,14 @@ void RouteOrch::incNhgRefCount(const std::string &nhg_index) { gCbfNhgOrch->incNhgRefCount(nhg_index); } + + if (!context_index.empty()) + { + m_srv6Orch->increasePicContextIdRefCount(context_index); + } } -void RouteOrch::decNhgRefCount(const std::string &nhg_index) +void RouteOrch::decNhgRefCount(const std::string &nhg_index, const std::string &context_index) { SWSS_LOG_ENTER(); @@ -2814,6 +2919,11 @@ void RouteOrch::decNhgRefCount(const std::string &nhg_index) { gCbfNhgOrch->decNhgRefCount(nhg_index); } + + if (!context_index.empty()) + { + m_srv6Orch->decreasePicContextIdRefCount(context_index); + } } void RouteOrch::publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status) diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 595af46081..6f9e7f243f 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -65,12 +65,14 @@ struct RouteNhg */ std::string nhg_index; + std::string context_index; + RouteNhg() = default; - RouteNhg(const NextHopGroupKey& key, const std::string& index) : - nhg_key(key), nhg_index(index) {} + RouteNhg(const NextHopGroupKey& key, const std::string& index, const std::string &context_index = "") : + nhg_key(key), nhg_index(index), context_index(context_index) {} bool operator==(const RouteNhg& rnhg) - { return ((nhg_key == rnhg.nhg_key) && (nhg_index == rnhg.nhg_index)); } + { return ((nhg_key == rnhg.nhg_key) && (nhg_index == rnhg.nhg_index) && (context_index == rnhg.context_index)); } bool operator!=(const RouteNhg& rnhg) { return !(*this == rnhg); } }; @@ -117,11 +119,17 @@ struct RouteBulkContext NextHopGroupKey tmp_next_hop; // Temporary next hop NextHopGroupKey nhg; std::string nhg_index; + std::string context_index; sai_object_id_t vrf_id; IpPrefix ip_prefix; bool excp_intfs_flag; // using_temp_nhg will track if the NhgOrch's owned NHG is temporary or not bool using_temp_nhg; + std::vector ipv; + std::vector alsv; + std::vector vni_labelv; + std::vector rmacv; + bool vrf_group_flag; std::string key; // Key in database table std::string protocol; // Protocol string @@ -141,8 +149,9 @@ struct RouteBulkContext object_statuses.clear(); tmp_next_hop.clear(); nhg.clear(); - excp_intfs_flag = false; + ipv.clear(); vrf_id = SAI_NULL_OBJECT_ID; + excp_intfs_flag = false; using_temp_nhg = false; key.clear(); protocol.clear(); @@ -255,6 +264,7 @@ class RouteOrch : public Orch, public Subject std::set m_SubnetDecapTermsCreated; ProducerStateTable m_appTunnelDecapTermProducer; + std::vector m_bulkSrv6NhgReducedVec; NextHopObserverTable m_nextHopObservers; @@ -263,7 +273,7 @@ class RouteOrch : public Orch, public Subject ObjectBulker gNextHopGroupMemberBulker; void addTempRoute(RouteBulkContext& ctx, const NextHopGroupKey&); - bool addRoute(RouteBulkContext& ctx, const NextHopGroupKey&); + bool addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops); bool removeRoute(RouteBulkContext& ctx); bool addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey &nextHops); bool removeRoutePost(const RouteBulkContext& ctx); @@ -280,14 +290,14 @@ class RouteOrch : public Orch, public Subject void doLabelTask(Consumer& consumer); const NhgBase &getNhg(const std::string& nhg_index); - void incNhgRefCount(const std::string& nhg_index); - void decNhgRefCount(const std::string& nhg_index); void publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status = ReturnCode(SAI_STATUS_SUCCESS)); bool isVipRoute(const IpPrefix &ipPrefix, const NextHopGroupKey &nextHops); void createVipRouteSubnetDecapTerm(const IpPrefix &ipPrefix); void removeVipRouteSubnetDecapTerm(const IpPrefix &ipPrefix); + void incNhgRefCount(const std::string& nhg_index, const std::string &context_index = ""); + void decNhgRefCount(const std::string& nhg_index, const std::string &context_index = ""); }; #endif /* SWSS_ROUTEORCH_H */ diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index e7cf7fb018..773db489d6 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -854,6 +854,8 @@ static inline void initSaiRedisCounterEmptyParameter(sai_redis_flex_counter_grou initSaiRedisCounterEmptyParameter(flex_counter_group_param.stats_mode); initSaiRedisCounterEmptyParameter(flex_counter_group_param.plugin_name); initSaiRedisCounterEmptyParameter(flex_counter_group_param.plugins); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size_per_prefix); } static inline void initSaiRedisCounterParameterFromString(sai_s8_list_t &sai_s8_list, const std::string &str) @@ -938,6 +940,8 @@ void setFlexCounterGroupParameter(const string &group, attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; attr.value.ptr = &flex_counter_group_param; + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size_per_prefix); initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); initSaiRedisCounterParameterFromString(flex_counter_group_param.poll_interval, poll_interval); initSaiRedisCounterParameterFromString(flex_counter_group_param.operation, operation); @@ -1017,6 +1021,25 @@ void setFlexCounterGroupStatsMode(const std::string &group, notifySyncdCounterOperation(is_gearbox, attr); } +void setFlexCounterGroupBulkChunkSize(const std::string &group, + const std::string &bulk_chunk_size, + const std::string &bulk_chunk_size_per_prefix, + bool is_gearbox) +{ + sai_attribute_t attr; + sai_redis_flex_counter_group_parameter_t flex_counter_group_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; + attr.value.ptr = &flex_counter_group_param; + + initSaiRedisCounterEmptyParameter(flex_counter_group_param); + initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); + initSaiRedisCounterParameterFromString(flex_counter_group_param.bulk_chunk_size, bulk_chunk_size); + initSaiRedisCounterParameterFromString(flex_counter_group_param.bulk_chunk_size_per_prefix, bulk_chunk_size_per_prefix); + + notifySyncdCounterOperation(is_gearbox, attr); +} + void delFlexCounterGroup(const std::string &group, bool is_gearbox) { diff --git a/orchagent/saihelper.h b/orchagent/saihelper.h index 7334adff35..0406427059 100644 --- a/orchagent/saihelper.h +++ b/orchagent/saihelper.h @@ -39,6 +39,11 @@ void setFlexCounterGroupStatsMode(const std::string &group, const std::string &stats_mode, bool is_gearbox=false); +void setFlexCounterGroupBulkChunkSize(const std::string &group, + const std::string &bulk_size, + const std::string &bulk_chunk_size_per_prefix, + bool is_gearbox=false); + void delFlexCounterGroup(const std::string &group, bool is_gearbox=false); diff --git a/orchagent/srv6orch.cpp b/orchagent/srv6orch.cpp index d0b4a39f88..8f4458c845 100644 --- a/orchagent/srv6orch.cpp +++ b/orchagent/srv6orch.cpp @@ -1,5 +1,6 @@ #include #include +#include #include "routeorch.h" #include "logger.h" @@ -36,11 +37,11 @@ const map end_behavior_map = {"end.b6.encaps.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED}, {"end.b6.insert", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT}, {"end.b6.insert.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED}, - {"udx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6}, - {"udx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4}, - {"udt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6}, - {"udt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4}, - {"udt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46}, + {"udx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX6}, + {"udx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX4}, + {"udt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT6}, + {"udt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT4}, + {"udt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT46}, {"un", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN}, {"ua", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA} }; @@ -79,6 +80,16 @@ size_t Srv6Orch::srv6TunnelNexthopSize(const string srv6_source) return srv6_tunnel_table_[srv6_source].nexthops.size(); } +bool Srv6Orch::sidListExists(const string &segment_name) +{ + SWSS_LOG_ENTER(); + if (sid_table_.find(segment_name) != sid_table_.end()) + { + return true; + } + return false; +} + bool Srv6Orch::createSrv6Tunnel(const string srv6_source) { SWSS_LOG_ENTER(); @@ -132,60 +143,50 @@ bool Srv6Orch::srv6NexthopExists(const NextHopKey &nhKey) } } -bool Srv6Orch::removeSrv6Nexthops(const NextHopGroupKey &nhg) +bool Srv6Orch::removeSrv6NexthopWithoutVpn(const NextHopKey &nhKey) { SWSS_LOG_ENTER(); + return deleteSrv6Nexthop(nhKey); +} - for (auto &sr_nh : nhg.getNextHops()) - { - string srv6_source, segname; - sai_status_t status = SAI_STATUS_SUCCESS; - srv6_source = sr_nh.srv6_source; - segname = sr_nh.srv6_segment; +bool Srv6Orch::removeSrv6Nexthops(const std::vector &nhgv) +{ + SWSS_LOG_ENTER(); - SWSS_LOG_NOTICE("SRV6 Nexthop %s refcount %d", sr_nh.to_string(false,true).c_str(), m_neighOrch->getNextHopRefCount(sr_nh)); - if (m_neighOrch->getNextHopRefCount(sr_nh) == 0) + // 1. remove vpn_sid first + for (auto& it_nhg : nhgv) + { + if (it_nhg.is_srv6_vpn()) { - status = sai_next_hop_api->remove_next_hop(srv6_nexthop_table_[sr_nh]); - if (status != SAI_STATUS_SUCCESS) + for (auto &sr_nh : it_nhg.getNextHops()) { - SWSS_LOG_ERROR("Failed to remove SRV6 nexthop %s", sr_nh.to_string(false,true).c_str()); - return false; - } - - /* Update nexthop in SID table after deleting the nexthop */ - SWSS_LOG_INFO("Seg %s nexthop refcount %zu", - segname.c_str(), - sid_table_[segname].nexthops.size()); - if (sid_table_[segname].nexthops.find(sr_nh) != sid_table_[segname].nexthops.end()) - { - sid_table_[segname].nexthops.erase(sr_nh); + if (sr_nh.isSrv6Vpn()) + { + if (!deleteSrv6Vpn(sr_nh.ip_address.to_string(), sr_nh.srv6_vpn_sid, getAggId(it_nhg))) + { + SWSS_LOG_ERROR("Failed to delete SRV6 vpn %s", sr_nh.to_string(false, true).c_str()); + return false; + } + } } - m_neighOrch->updateSrv6Nexthop(sr_nh, 0); - srv6_nexthop_table_.erase(sr_nh); - - /* Delete NH from the tunnel map */ - SWSS_LOG_INFO("Delete NH %s from tunnel map", - sr_nh.to_string(false, true).c_str()); - srv6TunnelUpdateNexthops(srv6_source, sr_nh, false); + decreasePrefixAggIdRefCount(it_nhg); + deleteAggId(it_nhg); } + } - size_t tunnel_nhs = srv6TunnelNexthopSize(srv6_source); - if (tunnel_nhs == 0) + // 2. delete nexthop & prefix agg id + for (auto& nhg : nhgv) + { + for (auto &sr_nh : nhg.getNextHops()) { - status = sai_tunnel_api->remove_tunnel(srv6_tunnel_table_[srv6_source].tunnel_object_id); - if (status != SAI_STATUS_SUCCESS) + if (!deleteSrv6Nexthop(sr_nh)) { - SWSS_LOG_ERROR("Failed to remove SRV6 tunnel object for source %s", srv6_source.c_str()); + SWSS_LOG_ERROR("Failed to delete SRV6 nexthop %s", sr_nh.to_string(false,true).c_str()); return false; } - srv6_tunnel_table_.erase(srv6_source); - } - else - { - SWSS_LOG_INFO("Nexthops referencing this tunnel object %s: %zu", srv6_source.c_str(),tunnel_nhs); } } + return true; } @@ -194,26 +195,42 @@ bool Srv6Orch::createSrv6Nexthop(const NextHopKey &nh) SWSS_LOG_ENTER(); string srv6_segment = nh.srv6_segment; string srv6_source = nh.srv6_source; + string srv6_tunnel_endpoint; if (srv6NexthopExists(nh)) { SWSS_LOG_INFO("SRV6 nexthop already created for %s", nh.to_string(false,true).c_str()); return true; } - sai_object_id_t srv6_object_id = sid_table_[srv6_segment].sid_object_id; - sai_object_id_t srv6_tunnel_id = srv6_tunnel_table_[srv6_source].tunnel_object_id; - if (srv6_object_id == SAI_NULL_OBJECT_ID) + sai_object_id_t srv6_segment_id; + sai_object_id_t srv6_tunnel_id; + + if (srv6_segment == "") { - SWSS_LOG_ERROR("segment object doesn't exist for segment %s", srv6_segment.c_str()); - return false; + srv6_segment_id = SAI_NULL_OBJECT_ID; + } + else + { + if (!sidListExists(srv6_segment)) + { + SWSS_LOG_ERROR("Segment %s does not exist", srv6_segment.c_str()); + return false; + } + srv6_segment_id = sid_table_[srv6_segment].sid_object_id; } - if (srv6_tunnel_id == SAI_NULL_OBJECT_ID) + if (nh.ip_address.isZero()) { - SWSS_LOG_ERROR("tunnel object doesn't exist for source %s", srv6_source.c_str()); - return false; + srv6_tunnel_endpoint = srv6_source; + srv6_tunnel_id = srv6_tunnel_table_[srv6_tunnel_endpoint].tunnel_object_id; + } + else + { + srv6_tunnel_endpoint = nh.ip_address.to_string(); + srv6_tunnel_id = srv6_p2p_tunnel_table_[srv6_tunnel_endpoint].tunnel_id; } + SWSS_LOG_INFO("Create srv6 nh for tunnel src %s with seg %s", srv6_source.c_str(), srv6_segment.c_str()); vector nh_attrs; sai_object_id_t nexthop_id; @@ -225,7 +242,7 @@ bool Srv6Orch::createSrv6Nexthop(const NextHopKey &nh) nh_attrs.push_back(attr); attr.id = SAI_NEXT_HOP_ATTR_SRV6_SIDLIST_ID; - attr.value.oid = srv6_object_id; + attr.value.oid = srv6_segment_id; nh_attrs.push_back(attr); attr.id = SAI_NEXT_HOP_ATTR_TUNNEL_ID; @@ -242,33 +259,169 @@ bool Srv6Orch::createSrv6Nexthop(const NextHopKey &nh) } m_neighOrch->updateSrv6Nexthop(nh, nexthop_id); srv6_nexthop_table_[nh] = nexthop_id; - sid_table_[srv6_segment].nexthops.insert(nh); - srv6TunnelUpdateNexthops(srv6_source, nh, true); + if (srv6_segment != "") + { + sid_table_[srv6_segment].nexthops.insert(nh); + } + + if (nh.ip_address.isZero()) + { + srv6TunnelUpdateNexthops(srv6_source, nh, true); + } + else + { + srv6P2ptunnelUpdateNexthops(nh, true); + } return true; } -bool Srv6Orch::srv6Nexthops(const NextHopGroupKey &nhgKey, sai_object_id_t &nexthop_id) +bool Srv6Orch::deleteSrv6Nexthop(const NextHopKey &nh) { SWSS_LOG_ENTER(); - set nexthops = nhgKey.getNextHops(); - string srv6_source; - string srv6_segment; - for (auto nh : nexthops) + sai_status_t status = SAI_STATUS_SUCCESS; + + if (!srv6NexthopExists(nh)) { - srv6_source = nh.srv6_source; + return true; + } + + SWSS_LOG_DEBUG("SRV6 Nexthop %s refcount %d", nh.to_string(false,true).c_str(), m_neighOrch->getNextHopRefCount(nh)); + if (m_neighOrch->getNextHopRefCount(nh) == 0) + { + sai_object_id_t nexthop_id; + nexthop_id = srv6_nexthop_table_[nh]; + status = sai_next_hop_api->remove_next_hop(nexthop_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 nexthop %s", nh.to_string(false,true).c_str()); + return false; + } + + /* Decrease srv6 segment reference */ + if (nh.srv6_segment != "") + { + /* Update nexthop in SID table after deleting the nexthop */ + SWSS_LOG_INFO("Seg %s nexthop refcount %zu", + nh.srv6_segment.c_str(), + sid_table_[nh.srv6_segment].nexthops.size()); + if (sid_table_[nh.srv6_segment].nexthops.find(nh) != sid_table_[nh.srv6_segment].nexthops.end()) + { + sid_table_[nh.srv6_segment].nexthops.erase(nh); + } + } + m_neighOrch->updateSrv6Nexthop(nh, 0); + + srv6_nexthop_table_.erase(nh); + + /* Delete NH from the tunnel map */ + SWSS_LOG_INFO("Delete NH %s from tunnel map", + nh.to_string(false, true).c_str()); + + if (nh.ip_address.isZero()) + { + string srv6_source = nh.srv6_source; + srv6TunnelUpdateNexthops(srv6_source, nh, false); + size_t tunnel_nhs = srv6TunnelNexthopSize(srv6_source); + if (tunnel_nhs == 0) + { + status = sai_tunnel_api->remove_tunnel(srv6_tunnel_table_[srv6_source].tunnel_object_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 tunnel object for source %s", srv6_source.c_str()); + return false; + } + srv6_tunnel_table_.erase(srv6_source); + } + else + { + SWSS_LOG_INFO("Nexthops referencing this tunnel object %s: %zu", srv6_source.c_str(),tunnel_nhs); + } + } + else + { + std::string endpoint = nh.ip_address.to_string(); + srv6P2ptunnelUpdateNexthops(nh, false); + if (!deleteSrv6P2pTunnel(endpoint)) + { + SWSS_LOG_ERROR("Failed to remove SRV6 p2p tunnel object for dst %s,", endpoint.c_str()); + return false; + } + } + } + + return true; +} + +bool Srv6Orch::createSrv6NexthopWithoutVpn(const NextHopKey &nh, sai_object_id_t &nexthop_id) +{ + SWSS_LOG_ENTER(); + + // 1. create tunnel + if (nh.ip_address.isZero()) + { + // create srv6 tunnel + auto srv6_source = nh.srv6_source; if (!createSrv6Tunnel(srv6_source)) { SWSS_LOG_ERROR("Failed to create tunnel for source %s", srv6_source.c_str()); return false; } - if (!createSrv6Nexthop(nh)) + } + else + { + // create p2p tunnel + if (!createSrv6P2pTunnel(nh.srv6_source, nh.ip_address.to_string())) { - SWSS_LOG_ERROR("Failed to create SRV6 nexthop %s", nh.to_string(false,true).c_str()); + SWSS_LOG_ERROR("Failed to create SRV6 p2p tunnel %s", nh.to_string(false, true).c_str()); return false; } } + // 2. create nexthop + if (!createSrv6Nexthop(nh)) + { + SWSS_LOG_ERROR("Failed to create SRV6 nexthop %s", nh.to_string(false,true).c_str()); + return false; + } + + nexthop_id = srv6_nexthop_table_[nh]; + return true; +} + +bool Srv6Orch::srv6Nexthops(const NextHopGroupKey &nhgKey, sai_object_id_t &nexthop_id) +{ + SWSS_LOG_ENTER(); + set nexthops = nhgKey.getNextHops(); + + for (auto nh : nexthops) + { + // create SRv6 nexthop + if (!createSrv6NexthopWithoutVpn(nh, nexthop_id)) + { + SWSS_LOG_ERROR("Failed to create SRv6 nexthop %s", nh.to_string(false, true).c_str()); + return false; + } + } + + // create SRv6 VPN if need + if (nhgKey.is_srv6_vpn()) + { + for (auto it = nexthops.begin(); it != nexthops.end(); ++it) + { + if (it->isSrv6Vpn()) + { + if (!createSrv6Vpn(it->ip_address.to_string(), it->srv6_vpn_sid, getAggId(nhgKey))) + { + SWSS_LOG_ERROR("Failed to create SRV6 vpn %s", it->to_string(false, true).c_str()); + return false; + } + } + } + + increasePrefixAggIdRefCount(nhgKey); + } + if (nhgKey.getSize() == 1) { NextHopKey nhkey(nhgKey.to_string(), false, true); @@ -622,7 +775,10 @@ bool Srv6Orch::mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_b if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_T || end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4 || end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6 || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46) + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT46) { return true; } @@ -634,6 +790,8 @@ bool Srv6Orch::mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t e if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X || end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4 || end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX6 || end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS || end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED || end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT || @@ -861,6 +1019,491 @@ bool Srv6Orch::deleteMysidEntry(const string my_sid_string) return true; } +uint32_t Srv6Orch::getAggId(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + static uint32_t g_agg_id = 1; + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_.find(nhg) != srv6_prefix_agg_id_table_.end()) { + agg_id = srv6_prefix_agg_id_table_[nhg].prefix_agg_id; + SWSS_LOG_INFO("Agg id already exist, agg_id_key: %s, agg_id %u", nhg.to_string().c_str(), agg_id); + } else { + while (srv6_prefix_agg_id_set_.find(g_agg_id) != srv6_prefix_agg_id_set_.end()) { + SWSS_LOG_INFO("Agg id %d is busy, try next", g_agg_id); + g_agg_id++; + // restart with 1 if flip + if (g_agg_id == 0) { + g_agg_id = 1; + } + } + agg_id = g_agg_id; + srv6_prefix_agg_id_table_[nhg].prefix_agg_id = g_agg_id; + // initialize ref_count with 0, will be added in increasePrefixAggIdRefCount() later + srv6_prefix_agg_id_table_[nhg].ref_count = 0; + srv6_prefix_agg_id_set_.insert(g_agg_id); + SWSS_LOG_INFO("Agg id not exist, create agg_id_key: %s, agg_id %u", nhg.to_string().c_str(), agg_id); + } + + return agg_id; +} + +uint32_t Srv6Orch::getAggId(const std::string& index) +{ + SWSS_LOG_ENTER(); + static uint32_t g_agg_id = 1; + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_for_nhg_.find(index) != srv6_prefix_agg_id_table_for_nhg_.end()) { + agg_id = srv6_prefix_agg_id_table_for_nhg_[index].prefix_agg_id; + SWSS_LOG_INFO("Agg id already exist, agg_id_key: %s, agg_id %u", index.c_str(), agg_id); + } else { + while (srv6_prefix_agg_id_set_.find(g_agg_id) != srv6_prefix_agg_id_set_.end()) { + SWSS_LOG_INFO("Agg id %d is busy, try next", g_agg_id); + g_agg_id++; + // restart with 1 if flip + if (g_agg_id == 0) { + g_agg_id = 1; + } + } + agg_id = g_agg_id; + srv6_prefix_agg_id_table_for_nhg_[index].prefix_agg_id = g_agg_id; + // initialize ref_count with 0, will be added in increasePrefixAggIdRefCount() later + srv6_prefix_agg_id_table_for_nhg_[index].ref_count = 0; + srv6_prefix_agg_id_set_.insert(g_agg_id); + SWSS_LOG_INFO("Agg id not exist, create agg_id_key: %s, agg_id %u", index.c_str(), agg_id); + } + + return agg_id; +} + +void Srv6Orch::deleteAggId(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_.find(nhg) == srv6_prefix_agg_id_table_.end()) { + return; + } + + agg_id = srv6_prefix_agg_id_table_[nhg].prefix_agg_id; + if (srv6_prefix_agg_id_table_[nhg].ref_count == 0) { + srv6_prefix_agg_id_table_.erase(nhg); + srv6_prefix_agg_id_set_.erase(agg_id); + SWSS_LOG_INFO("Delete Agg id %d, agg_id_key: %s", agg_id, nhg.to_string().c_str()); + } + else + { + SWSS_LOG_INFO("Referencing this prefix agg id %u : %u", agg_id, srv6_prefix_agg_id_table_[nhg].ref_count); + } +} + +void Srv6Orch::deleteAggId(const std::string& index) +{ + SWSS_LOG_ENTER(); + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_for_nhg_.find(index) == srv6_prefix_agg_id_table_for_nhg_.end()) { + return; + } + + agg_id = srv6_prefix_agg_id_table_for_nhg_[index].prefix_agg_id; + if (srv6_prefix_agg_id_table_for_nhg_[index].ref_count == 0) { + srv6_prefix_agg_id_table_for_nhg_.erase(index); + srv6_prefix_agg_id_set_.erase(agg_id); + SWSS_LOG_INFO("Delete Agg id %d, agg_id_key: %s", agg_id, index.c_str()); + } + else + { + SWSS_LOG_INFO("Referencing this prefix agg id %u : %u", agg_id, srv6_prefix_agg_id_table_for_nhg_[index].ref_count); + } +} + +void Srv6Orch::increasePicContextIdRefCount(const std::string &index) +{ + SWSS_LOG_ENTER(); + if (srv6_pic_context_table_.find(index) == srv6_pic_context_table_.end()) + SWSS_LOG_ERROR("Unexpected refcount increase for context id %s", index.c_str()); + else + ++srv6_pic_context_table_[index].ref_count; +} + +void Srv6Orch::decreasePicContextIdRefCount(const std::string &index) +{ + SWSS_LOG_ENTER(); + if (srv6_pic_context_table_.find(index) == srv6_pic_context_table_.end()) + SWSS_LOG_ERROR("Unexpected refcount decrease for context id %s", index.c_str()); + else + --srv6_pic_context_table_[index].ref_count; +} + +void Srv6Orch::increasePrefixAggIdRefCount(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_.find(nhg) == srv6_prefix_agg_id_table_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount increase for nexthop %s", nhg.to_string().c_str()); + } + else + { + srv6_prefix_agg_id_table_[nhg].ref_count++; + } +} + +void Srv6Orch::increasePrefixAggIdRefCount(const std::string& index) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_for_nhg_.find(index) == srv6_prefix_agg_id_table_for_nhg_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount increase for nexthop %s", index.c_str()); + } + else + { + ++srv6_prefix_agg_id_table_for_nhg_[index].ref_count; + } +} + +void Srv6Orch::decreasePrefixAggIdRefCount(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_.find(nhg) == srv6_prefix_agg_id_table_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount decrease for nexthop %s", nhg.to_string().c_str()); + } + else + { + srv6_prefix_agg_id_table_[nhg].ref_count--; + } +} + +void Srv6Orch::decreasePrefixAggIdRefCount(const std::string& index) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_for_nhg_.find(index) == srv6_prefix_agg_id_table_for_nhg_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount decrease for nexthop %s", index.c_str()); + } + else + { + --srv6_prefix_agg_id_table_for_nhg_[index].ref_count; + } +} + +bool Srv6Orch::srv6P2pTunnelExists(const std::string &endpoint) +{ + if (srv6_p2p_tunnel_table_.find(endpoint) != srv6_p2p_tunnel_table_.end()) + { + return true; + } + return false; +} + +bool Srv6Orch::createSrv6P2pTunnel(const std::string &src, const std::string &endpoint) +{ + SWSS_LOG_ENTER(); + sai_status_t saistatus; + sai_object_id_t srv6_tunnel_map_id; + + sai_attribute_t tunnel_map_attr; + vector tunnel_map_attrs; + + if (srv6P2pTunnelExists(endpoint)) { + return true; + } + + // 0. create tunnel map + tunnel_map_attr.id = SAI_TUNNEL_MAP_ATTR_TYPE; + tunnel_map_attr.value.u32 = SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID; + tunnel_map_attrs.push_back(tunnel_map_attr); + + saistatus = sai_tunnel_api->create_tunnel_map(&srv6_tunnel_map_id, gSwitchId, + (uint32_t)tunnel_map_attrs.size(), tunnel_map_attrs.data()); + if (saistatus != SAI_STATUS_SUCCESS) { + SWSS_LOG_ERROR("Failed to create srv6 p2p tunnel map for src_ip: %s dst_ip: %s", src.c_str(), endpoint.c_str()); + return false; + } + + // 1. create tunnel + sai_object_id_t tunnel_id; + sai_attribute_t tunnel_attr; + vector tunnel_attrs; + sai_ip_address_t ipaddr; + + tunnel_attr.id = SAI_TUNNEL_ATTR_TYPE; + tunnel_attr.value.s32 = SAI_TUNNEL_TYPE_SRV6; + tunnel_attrs.push_back(tunnel_attr); + + IpAddress src_ip(src); + ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(ipaddr.addr.ip6, src_ip.getV6Addr(), sizeof(ipaddr.addr.ip6)); + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + tunnel_attr.value.ipaddr = ipaddr; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + tunnel_attr.value.oid = gUnderlayIfId; + tunnel_attrs.push_back(tunnel_attr); + + sai_object_id_t tunnel_map_list[1]; + tunnel_map_list[0] = srv6_tunnel_map_id; + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_MAPPERS; + tunnel_attr.value.objlist.count = 1; + tunnel_attr.value.objlist.list = tunnel_map_list; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_PEER_MODE; + tunnel_attr.value.u32 = SAI_TUNNEL_PEER_MODE_P2P; + tunnel_attrs.push_back(tunnel_attr); + + IpAddress dst_ip(endpoint); + ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(ipaddr.addr.ip6, dst_ip.getV6Addr(), sizeof(ipaddr.addr.ip6)); + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_DST_IP; + tunnel_attr.value.ipaddr = ipaddr; + tunnel_attrs.push_back(tunnel_attr); + + saistatus = sai_tunnel_api->create_tunnel( + &tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); + if (saistatus != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create srv6 p2p tunnel for src ip: %s, dst ip: %s", + src.c_str(), endpoint.c_str()); + + sai_tunnel_api->remove_tunnel_map(srv6_tunnel_map_id); + return false; + } + + srv6_p2p_tunnel_table_[endpoint].tunnel_id = tunnel_id; + srv6_p2p_tunnel_table_[endpoint].tunnel_map_id = srv6_tunnel_map_id; + return true; +} + +bool Srv6Orch::deleteSrv6P2pTunnel(const std::string &endpoint) +{ + if (srv6_p2p_tunnel_table_.find(endpoint) == srv6_p2p_tunnel_table_.end()) + { + return true; + } + + if (srv6P2pTunnelNexthopSize(endpoint) || srv6P2pTunnelEntrySize(endpoint)) + { + SWSS_LOG_INFO("There are still SRv6 VPNs or Nexthops referencing this srv6 p2p tunnel object dst %s", endpoint.c_str()); + return true; + } + + sai_status_t status; + + // 0. remove tunnel + status = sai_tunnel_api->remove_tunnel(srv6_p2p_tunnel_table_[endpoint].tunnel_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 p2p tunnel object for dst_ip: %s", endpoint.c_str()); + return false; + } + + // 1. remove tunnel map + status = sai_tunnel_api->remove_tunnel_map(srv6_p2p_tunnel_table_[endpoint].tunnel_map_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 tunnel map object for dst_ip: %s", endpoint.c_str()); + return false; + } + + srv6_p2p_tunnel_table_.erase(endpoint); + return true; +} + +void Srv6Orch::srv6P2ptunnelUpdateNexthops(const NextHopKey &nhkey, bool insert) +{ + if (insert) + { + srv6_p2p_tunnel_table_[nhkey.ip_address.to_string()].nexthops.insert(nhkey); + } + else + { + srv6_p2p_tunnel_table_[nhkey.ip_address.to_string()].nexthops.erase(nhkey); + } +} + +size_t Srv6Orch::srv6P2pTunnelNexthopSize(const std::string &endpoint) +{ + return srv6_p2p_tunnel_table_[endpoint].nexthops.size(); +} + +void Srv6Orch::srv6P2pTunnelUpdateEntries(const Srv6TunnelMapEntryKey &tmek, bool insert) +{ + if (insert) + srv6_p2p_tunnel_table_[tmek.endpoint].tunnel_map_entries.insert(tmek); + else + srv6_p2p_tunnel_table_[tmek.endpoint].tunnel_map_entries.erase(tmek); +} + +size_t Srv6Orch::srv6P2pTunnelEntrySize(const std::string &endpoint) +{ + return srv6_p2p_tunnel_table_[endpoint].tunnel_map_entries.size(); +} + +bool Srv6Orch::createSrv6Vpns(const Srv6PicContextInfo &pci, const std::string &context_id) +{ + auto agg_id = getAggId(context_id); + for (size_t i = 0; i < pci.nexthops.size(); ++i) + { + if (!createSrv6Vpn(pci.nexthops[i], pci.sids[i], agg_id)) + { + for (size_t j = 0; j < i; ++j) + { + deleteSrv6Vpn(pci.nexthops[j], pci.sids[j], agg_id); + } + deleteAggId(context_id); + return false; + } + } + + increasePrefixAggIdRefCount(context_id); + + return true; +} + +bool Srv6Orch::createSrv6Vpn(const std::string &endpoint, const std::string &sid, const uint32_t prefix_agg_id) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + + Srv6TunnelMapEntryKey tmek; + tmek.endpoint = endpoint; + tmek.vpn_sid = sid; + tmek.prefix_agg_id = prefix_agg_id; + + if (srv6_tunnel_map_entry_table_.find(tmek) != srv6_tunnel_map_entry_table_.end()) + { + srv6_tunnel_map_entry_table_[tmek].ref_count++; + return true; + } + + if (srv6_p2p_tunnel_table_.find(endpoint) == srv6_p2p_tunnel_table_.end()) + { + SWSS_LOG_ERROR("Tunnel map for endpoint %s does not exist", endpoint.c_str()); + return false; + } + sai_object_id_t tunnel_map_id = srv6_p2p_tunnel_table_[endpoint].tunnel_map_id; + + // 1. create vpn tunnel_map entry + sai_attribute_t tunnel_map_entry_attr; + vector tunnel_map_entry_attrs; + sai_object_id_t tunnel_entry_id; + + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE; + tunnel_map_entry_attr.value.u32 = SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID; + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP; + tunnel_map_entry_attr.value.oid = tunnel_map_id; + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY; + tunnel_map_entry_attr.value.u32 = tmek.prefix_agg_id; + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + IpAddress vpn_sid(tmek.vpn_sid); + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_SRV6_VPN_SID_VALUE; + memcpy(tunnel_map_entry_attr.value.ip6, vpn_sid.getV6Addr(), sizeof(sai_ip6_t)); + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + status = sai_tunnel_api->create_tunnel_map_entry(&tunnel_entry_id, gSwitchId, + (uint32_t)tunnel_map_entry_attrs.size(), + tunnel_map_entry_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create vpn tunnel_map entry for vpn_sid: %s", tmek.vpn_sid.c_str()); + return false; + } + + // add reference for tunnel map entry + srv6_tunnel_map_entry_table_[tmek].tunnel_map_entry_id = tunnel_entry_id; + srv6_tunnel_map_entry_table_[tmek].ref_count = 1; + + srv6P2pTunnelUpdateEntries(tmek, true); + return true; +} + +bool Srv6Orch::deleteSrv6Vpns(const std::string &context_id) +{ + const auto &it = srv6_pic_context_table_.find(context_id); + if (it == srv6_pic_context_table_.end()) + { + SWSS_LOG_ERROR("Failed to find context id %s", context_id.c_str()); + return false; + } + + bool success = true; + auto agg_id = getAggId(context_id); + for (size_t i = 0; i < it->second.nexthops.size(); ++i) + { + if (!deleteSrv6Vpn(it->second.nexthops[i], it->second.sids[i], agg_id)) + { + success = false; + } + } + + if (success) + { + decreasePrefixAggIdRefCount(context_id); + } + deleteAggId(context_id); + + return success; +} + +bool Srv6Orch::deleteSrv6Vpn(const std::string &endpoint, const std::string &sid, const uint32_t prefix_agg_id) +{ + SWSS_LOG_ENTER(); + sai_status_t status; + + // 1. remove tunnel_map entry if need + sai_object_id_t tunnel_entry_id; + + Srv6TunnelMapEntryKey tmek; + tmek.endpoint = endpoint; + tmek.vpn_sid = sid; + tmek.prefix_agg_id = prefix_agg_id; + + if (srv6_tunnel_map_entry_table_.find(tmek) == srv6_tunnel_map_entry_table_.end()) + { + return true; + } + + srv6_tunnel_map_entry_table_[tmek].ref_count--; + if (srv6_tunnel_map_entry_table_[tmek].ref_count == 0) + { + tunnel_entry_id = srv6_tunnel_map_entry_table_[tmek].tunnel_map_entry_id; + status = sai_tunnel_api->remove_tunnel_map_entry(tunnel_entry_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove nexthop tunnel map entry (endpoint: %s, sid: %s, agg_id: %u)", + tmek.endpoint.c_str(), tmek.vpn_sid.c_str(), tmek.prefix_agg_id); + return false; + } + srv6_tunnel_map_entry_table_.erase(tmek); + + srv6P2pTunnelUpdateEntries(tmek, false); + if (!deleteSrv6P2pTunnel(tmek.endpoint)) + { + SWSS_LOG_ERROR("Failed to remove SRV6 p2p tunnel object for dst %s,", endpoint.c_str()); + return false; + } + } + else + { + SWSS_LOG_INFO("Nexthops referencing this tunnel map entry endpoint %s, vpn_sid %s, prefix_agg_id %u : %u", + tmek.endpoint.c_str(), + tmek.vpn_sid.c_str(), + tmek.prefix_agg_id, + srv6_tunnel_map_entry_table_[tmek].ref_count); + } + return true; +} + void Srv6Orch::doTaskMySidTable(const KeyOpFieldsValuesTuple & tuple) { SWSS_LOG_ENTER(); @@ -907,6 +1550,81 @@ void Srv6Orch::doTaskMySidTable(const KeyOpFieldsValuesTuple & tuple) } } +task_process_status Srv6Orch::doTaskPicContextTable(const KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + string op = kfvOp(tuple); + string key = kfvKey(tuple); + const auto &it = srv6_pic_context_table_.find(key); + if (op == SET_COMMAND) + { + if (it != srv6_pic_context_table_.end()) + { + SWSS_LOG_ERROR("update is not allowed for pic context table"); + return task_duplicated; + } + Srv6PicContextInfo pci; + pci.ref_count = 0; + for (auto i : kfvFieldsValues(tuple)) + { + if (fvField(i) == "nexthop") + { + pci.nexthops = tokenize(fvValue(i), ','); + } + else if (fvField(i) == "vpn_sid") + { + pci.sids = tokenize(fvValue(i), ','); + } + } + if (pci.nexthops.size() != pci.sids.size()) + { + SWSS_LOG_ERROR("inconsistent number of endpoints(%zu) and vpn sids(%zu)", + pci.nexthops.size(), pci.sids.size()); + return task_failed; + } + + if (!createSrv6Vpns(pci ,key)) + { + SWSS_LOG_ERROR("Failed to create SRv6 VPNs for context id %s", key.c_str()); + return task_need_retry; + } + + srv6_pic_context_table_[key] = pci; + } + else if (op == DEL_COMMAND) + { + if (it == srv6_pic_context_table_.end()) + { + SWSS_LOG_INFO("Unable to find pic context entry for key %s", key.c_str()); + return task_ignore; + } + else if (it->second.ref_count != 0) + { + SWSS_LOG_INFO("Unable to delete context id %s, because it is referenced %u times", key.c_str(), it->second.ref_count); + return task_need_retry; + } + else if (!deleteSrv6Vpns(key)) + { + SWSS_LOG_ERROR("Failed to delete SRv6 VPNs for context id %s", key.c_str()); + return task_need_retry; + } + srv6_pic_context_table_.erase(it); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + return task_ignore; + } + return task_success; +} + +bool Srv6Orch::contextIdExists(const std::string &context_id) +{ + if (srv6_pic_context_table_.find(context_id) == srv6_pic_context_table_.end()) + return false; + return true; +} + void Srv6Orch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -930,6 +1648,15 @@ void Srv6Orch::doTask(Consumer &consumer) { doTaskMySidTable(t); } + else if (table_name == APP_PIC_CONTEXT_TABLE_NAME) + { + status = doTaskPicContextTable(t); + if (status == task_need_retry) + { + ++it; + continue; + } + } else { SWSS_LOG_ERROR("Unknown table : %s",table_name.c_str()); diff --git a/orchagent/srv6orch.h b/orchagent/srv6orch.h index 872dfeaa5e..2654f7792e 100644 --- a/orchagent/srv6orch.h +++ b/orchagent/srv6orch.h @@ -46,10 +46,74 @@ struct MySidEntry string endAdjString; // Used for END.X, END.DX4, END.DX6 }; +struct Srv6TunnelMapEntryKey +{ + string endpoint; + string vpn_sid; + uint32_t prefix_agg_id; + + bool operator==(const Srv6TunnelMapEntryKey &o) const + { + return tie(endpoint, vpn_sid, prefix_agg_id) == + tie(o.endpoint, o.vpn_sid, o.prefix_agg_id); + } + + bool operator<(const Srv6TunnelMapEntryKey &o) const + { + return tie(endpoint, vpn_sid, prefix_agg_id) < + tie(o.endpoint, o.vpn_sid, o.prefix_agg_id); + } + + bool operator!=(const Srv6TunnelMapEntryKey &o) const + { + return !(*this == o); + } +}; + +struct Srv6TunnelMapEntryEntry +{ + sai_object_id_t tunnel_map_entry_id; + + // for sid remarking + sai_object_id_t inner_tunnel_map_id; + map inner_tunnel_map_entry_ids; + + uint32_t ref_count; +}; + +struct P2pTunnelEntry +{ + sai_object_id_t tunnel_id; + sai_object_id_t tunnel_map_id; + + set nexthops; + set tunnel_map_entries; +}; + +struct Srv6PrefixAggIdEntry +{ + uint32_t prefix_agg_id; + + uint32_t ref_count; +}; + +struct Srv6PicContextInfo +{ + vector nexthops; + vector sids; + uint32_t ref_count; +}; + typedef unordered_map SidTable; typedef unordered_map Srv6TunnelTable; typedef map Srv6NextHopTable; typedef unordered_map Srv6MySidTable; +typedef map Srv6P2pTunnelTable; +typedef map Srv6PrefixAggIdTable; +typedef map Srv6PrefixAggIdTableForNhg; +typedef set Srv6PrefixAggIdSet; +typedef map Srv6TunnelMapEntryTable; +typedef map Srv6PicContextTable; #define SID_LIST_DELIMITER ',' #define MY_SID_KEY_DELIMITER ':' @@ -62,7 +126,8 @@ class Srv6Orch : public Orch, public Observer m_switchOrch(switchOrch), m_neighOrch(neighOrch), m_sidTable(applDb, APP_SRV6_SID_LIST_TABLE_NAME), - m_mysidTable(applDb, APP_SRV6_MY_SID_TABLE_NAME) + m_mysidTable(applDb, APP_SRV6_MY_SID_TABLE_NAME), + m_piccontextTable(applDb, APP_PIC_CONTEXT_TABLE_NAME) { m_neighOrch->attach(this); } @@ -70,18 +135,33 @@ class Srv6Orch : public Orch, public Observer { m_neighOrch->detach(this); } + void increasePicContextIdRefCount(const std::string&); + void decreasePicContextIdRefCount(const std::string&); + void increasePrefixAggIdRefCount(const NextHopGroupKey&); + void increasePrefixAggIdRefCount(const std::string&); + void decreasePrefixAggIdRefCount(const NextHopGroupKey&); + void decreasePrefixAggIdRefCount(const std::string&); + uint32_t getAggId(const NextHopGroupKey &nhg); + uint32_t getAggId(const std::string& index); + void deleteAggId(const NextHopGroupKey &nhg); + void deleteAggId(const std::string& index); + bool createSrv6NexthopWithoutVpn(const NextHopKey &nhKey, sai_object_id_t &nexthop_id); bool srv6Nexthops(const NextHopGroupKey &nextHops, sai_object_id_t &next_hop_id); - bool removeSrv6Nexthops(const NextHopGroupKey &nhg); + bool removeSrv6NexthopWithoutVpn(const NextHopKey &nhKey); + bool removeSrv6Nexthops(const std::vector &nhgv); void update(SubjectType, void *); + bool contextIdExists(const std::string &context_id); private: void doTask(Consumer &consumer); task_process_status doTaskSidTable(const KeyOpFieldsValuesTuple &tuple); void doTaskMySidTable(const KeyOpFieldsValuesTuple &tuple); + task_process_status doTaskPicContextTable(const KeyOpFieldsValuesTuple &tuple); bool createUpdateSidList(const string seg_name, const string ips, const string sidlist_type); task_process_status deleteSidList(const string seg_name); bool createSrv6Tunnel(const string srv6_source); bool createSrv6Nexthop(const NextHopKey &nh); + bool deleteSrv6Nexthop(const NextHopKey &nh); bool srv6NexthopExists(const NextHopKey &nh); bool createUpdateMysidEntry(string my_sid_string, const string vrf, const string adj, const string end_action); bool deleteMysidEntry(const string my_sid_string); @@ -92,15 +172,35 @@ class Srv6Orch : public Orch, public Observer bool mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior); void srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert); size_t srv6TunnelNexthopSize(const string srv6_source); + bool sidListExists(const string &segment_name); + bool srv6P2pTunnelExists(const string &endpoint); + bool createSrv6P2pTunnel(const string &src, const string &endpoint); + bool deleteSrv6P2pTunnel(const string &endpoint); + void srv6P2ptunnelUpdateNexthops(const NextHopKey &nhkey, bool insert); + size_t srv6P2pTunnelNexthopSize(const string &endpoint); + void srv6P2pTunnelUpdateEntries(const Srv6TunnelMapEntryKey &tmek, bool insert); + size_t srv6P2pTunnelEntrySize(const string &endpoint); + bool createSrv6Vpn(const string &endpoint, const string &sid, const uint32_t prefix_agg_id); + bool createSrv6Vpns(const Srv6PicContextInfo &pci ,const std::string &context_id); + bool deleteSrv6Vpn(const string &endpoint, const string &sid, const uint32_t prefix_agg_id); + bool deleteSrv6Vpns(const std::string &context_id); void updateNeighbor(const NeighborUpdate& update); ProducerStateTable m_sidTable; ProducerStateTable m_mysidTable; + ProducerStateTable m_piccontextTable; SidTable sid_table_; Srv6TunnelTable srv6_tunnel_table_; Srv6NextHopTable srv6_nexthop_table_; Srv6MySidTable srv6_my_sid_table_; + Srv6P2pTunnelTable srv6_p2p_tunnel_table_; + Srv6PrefixAggIdTable srv6_prefix_agg_id_table_; + Srv6PrefixAggIdTableForNhg srv6_prefix_agg_id_table_for_nhg_; + Srv6PrefixAggIdSet srv6_prefix_agg_id_set_; + Srv6TunnelMapEntryTable srv6_tunnel_map_entry_table_; + Srv6PicContextTable srv6_pic_context_table_; + VRFOrch *m_vrfOrch; SwitchOrch *m_switchOrch; NeighOrch *m_neighOrch; diff --git a/orchagent/stporch.cpp b/orchagent/stporch.cpp index 38f502f321..d380609f39 100644 --- a/orchagent/stporch.cpp +++ b/orchagent/stporch.cpp @@ -20,6 +20,7 @@ StpOrch::StpOrch(DBConnector * db, DBConnector * stateDb, vector &tableN sai_attribute_t attr; sai_status_t status; + bool ret = false; m_stpTable = unique_ptr(new Table(stateDb, STATE_STP_TABLE_NAME)); @@ -28,12 +29,13 @@ StpOrch::StpOrch(DBConnector * db, DBConnector * stateDb, vector &tableN attrs.push_back(attr); status = sai_switch_api->get_switch_attribute(gSwitchId, (uint32_t)attrs.size(), attrs.data()); - if (status != SAI_STATUS_SUCCESS) + if (status == SAI_STATUS_SUCCESS) { - throw runtime_error("StpOrch initialization failure"); + m_defaultStpId = attrs[0].value.oid; + ret = true; } - - m_defaultStpId = attrs[0].value.oid; + + SWSS_LOG_NOTICE("StpOrch initialization %s", (ret == true)?"success":"failure"); }; diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 64442c96af..5aeed420f2 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -1115,8 +1115,22 @@ void SwitchOrch::onSwitchAsicSdkHealthEvent(sai_object_id_t switch_id, const string &severity_str = switch_asic_sdk_health_event_severity_reverse_map.at(severity); const string &category_str = switch_asic_sdk_health_event_category_reverse_map.at(category); string description_str; - const std::time_t &t = (std::time_t)timestamp.tv_sec; + std::time_t t = (std::time_t)timestamp.tv_sec; + const std::time_t now = std::time(0); + const double year_in_seconds = 86400 * 365; stringstream time_ss; + + /* + * In case vendor SAI passed a very large timestamp, put_time can cause segment fault which can not be caught by try/catch infra + * We check the difference between the timestamp from SAI and the current time and force to use current time if the gap is too large + * By doing so, we can avoid the segment fault + */ + if (difftime(t, now) > year_in_seconds) + { + SWSS_LOG_ERROR("Invalid timestamp second %" PRIx64 " in received ASIC/SDK health event, reset to current time", timestamp.tv_sec); + t = now; + } + time_ss << std::put_time(std::localtime(&t), "%Y-%m-%d %H:%M:%S"); switch (data.data_type) diff --git a/orchagent/swssnet.h b/orchagent/swssnet.h index 82b5b6f94f..8084b7fb4e 100644 --- a/orchagent/swssnet.h +++ b/orchagent/swssnet.h @@ -21,6 +21,7 @@ inline static sai_ip_address_t& copy(sai_ip_address_t& dst, const IpAddress& src switch(sip.family) { case AF_INET: + memset((void*)&dst.addr, 0, sizeof(dst.addr)); dst.addr_family = SAI_IP_ADDR_FAMILY_IPV4; dst.addr.ip4 = sip.ip_addr.ipv4_addr; break; @@ -41,6 +42,7 @@ inline static sai_ip_prefix_t& copy(sai_ip_prefix_t& dst, const IpPrefix& src) switch(ia.family) { case AF_INET: + memset((void*)&dst, 0, sizeof(dst)); dst.addr_family = SAI_IP_ADDR_FAMILY_IPV4; dst.addr.ip4 = ia.ip_addr.ipv4_addr; dst.mask.ip4 = ma.ip_addr.ipv4_addr; @@ -62,6 +64,7 @@ inline static sai_ip_prefix_t& copy(sai_ip_prefix_t& dst, const IpAddress& src) switch(sip.family) { case AF_INET: + memset((void*)&dst, 0, sizeof(dst)); dst.addr_family = SAI_IP_ADDR_FAMILY_IPV4; dst.addr.ip4 = sip.ip_addr.ipv4_addr; dst.mask.ip4 = 0xFFFFFFFF; diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index 05a2d3e603..3e489b7fb1 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -1329,10 +1329,6 @@ VxlanTunnelOrch::createNextHopTunnel(string tunnelName, IpAddress& ipAddr, return SAI_NULL_OBJECT_ID; } - SWSS_LOG_NOTICE("NH tunnel create for %s, ip %s, mac %s, vni %d", - tunnelName.c_str(), ipAddr.to_string().c_str(), - macAddress.to_string().c_str(), vni); - auto tunnel_obj = getVxlanTunnel(tunnelName); sai_object_id_t nh_id, tunnel_id = tunnel_obj->getTunnelId(); @@ -1342,6 +1338,10 @@ VxlanTunnelOrch::createNextHopTunnel(string tunnelName, IpAddress& ipAddr, return nh_id; } + SWSS_LOG_NOTICE("NH tunnel create for %s, ip %s, mac %s, vni %d", + tunnelName.c_str(), ipAddr.to_string().c_str(), + macAddress.to_string().c_str(), vni); + sai_ip_address_t host_ip; swss::copy(host_ip, ipAddr); @@ -2409,8 +2409,8 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) } // SAI Call to add tunnel to the VLAN flood domain - - string tagging_mode = "untagged"; + // NOTE: does 'untagged' make the most sense here? + string tagging_mode = "untagged"; gPortsOrch->addVlanMember(vlanPort, tunnelPort, tagging_mode); SWSS_LOG_INFO("remote_vtep=%s vni=%d vlanid=%d ", @@ -2569,7 +2569,7 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) } // SAI Call to add tunnel to the VLAN flood domain - + // NOTE: does 'untagged' make the most sense here? string tagging_mode = "untagged"; gPortsOrch->addVlanMember(vlanPort, tunnelPort, tagging_mode, end_point_ip); diff --git a/orchagent/vxlanorch.h b/orchagent/vxlanorch.h index 695f7441e0..f53dca05f3 100644 --- a/orchagent/vxlanorch.h +++ b/orchagent/vxlanorch.h @@ -46,6 +46,7 @@ typedef enum #define MAX_VLAN_ID 4095 #define MAX_VNI_ID 16777215 +#define DEFAULT_TUNNEL_ENCAP_TTL 255 typedef enum { @@ -196,7 +197,7 @@ class VxlanTunnel bool deleteMapperHw(uint8_t mapper_list, tunnel_map_use_t map_src); bool createMapperHw(uint8_t mapper_list, tunnel_map_use_t map_src); - bool createTunnelHw(uint8_t mapper_list, tunnel_map_use_t map_src, bool with_term = true, sai_uint8_t encap_ttl=0); + bool createTunnelHw(uint8_t mapper_list, tunnel_map_use_t map_src, bool with_term = true, sai_uint8_t encap_ttl=DEFAULT_TUNNEL_ENCAP_TTL); bool deleteTunnelHw(uint8_t mapper_list, tunnel_map_use_t map_src, bool with_term = true); void deletePendingSIPTunnel(); void increment_spurious_imr_add(const std::string remote_vtep); @@ -299,7 +300,7 @@ class VxlanTunnelOrch : public Orch2 bool createVxlanTunnelMap(string tunnelName, tunnel_map_type_t mapType, uint32_t vni, - sai_object_id_t encap, sai_object_id_t decap, uint8_t encap_ttl=0); + sai_object_id_t encap, sai_object_id_t decap, uint8_t encap_ttl=DEFAULT_TUNNEL_ENCAP_TTL); bool removeVxlanTunnelMap(string tunnelName, uint32_t vni); diff --git a/portsyncd/linksync.cpp b/portsyncd/linksync.cpp index 66cdc4df5f..4c00d366e7 100644 --- a/portsyncd/linksync.cpp +++ b/portsyncd/linksync.cpp @@ -27,7 +27,6 @@ using namespace swss; #define VLAN_DRV_NAME "bridge" #define TEAM_DRV_NAME "team" -const string MGMT_PREFIX = "eth"; const string INTFS_PREFIX = "Ethernet"; const string LAG_PREFIX = "PortChannel"; @@ -38,57 +37,11 @@ extern string g_switchType; LinkSync::LinkSync(DBConnector *appl_db, DBConnector *state_db) : m_portTableProducer(appl_db, APP_PORT_TABLE_NAME), m_portTable(appl_db, APP_PORT_TABLE_NAME), - m_statePortTable(state_db, STATE_PORT_TABLE_NAME), - m_stateMgmtPortTable(state_db, STATE_MGMT_PORT_TABLE_NAME) + m_statePortTable(state_db, STATE_PORT_TABLE_NAME) { std::shared_ptr if_ni(if_nameindex(), if_freenameindex); struct if_nameindex *idx_p; - for (idx_p = if_ni.get(); - idx_p != NULL && idx_p->if_index != 0 && idx_p->if_name != NULL; - idx_p++) - { - string key = idx_p->if_name; - - /* Explicitly store management ports oper status into the state database. - * This piece of information is used by SNMP. */ - if (!key.compare(0, MGMT_PREFIX.length(), MGMT_PREFIX)) - { - ostringstream cmd; - string res; - cmd << "cat /sys/class/net/" << shellquote(key) << "/operstate"; - try - { - EXEC_WITH_ERROR_THROW(cmd.str(), res); - } - catch (...) - { - SWSS_LOG_WARN("Failed to get %s oper status", key.c_str()); - continue; - } - - /* Remove the trailing newline */ - if (res.length() >= 1 && res.at(res.length() - 1) == '\n') - { - res.erase(res.length() - 1); - /* The value of operstate will be either up or down */ - if (res != "up" && res != "down") - { - SWSS_LOG_WARN("Unknown %s oper status %s", - key.c_str(), res.c_str()); - } - FieldValueTuple fv("oper_status", res); - vector fvs; - fvs.push_back(fv); - - m_stateMgmtPortTable.set(key, fvs); - SWSS_LOG_INFO("Store %s oper status %s to state DB", - key.c_str(), res.c_str()); - } - continue; - } - } - if (!WarmStart::isWarmStart()) { /* See the comments for g_portSet in portsyncd.cpp */ @@ -168,8 +121,7 @@ void LinkSync::onMsg(int nlmsg_type, struct nl_object *obj) string key = rtnl_link_get_name(link); if (key.compare(0, INTFS_PREFIX.length(), INTFS_PREFIX) && - key.compare(0, LAG_PREFIX.length(), LAG_PREFIX) && - key.compare(0, MGMT_PREFIX.length(), MGMT_PREFIX)) + key.compare(0, LAG_PREFIX.length(), LAG_PREFIX)) { return; } @@ -188,24 +140,13 @@ void LinkSync::onMsg(int nlmsg_type, struct nl_object *obj) if (type) { - SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d type:%s", - nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master, type); + SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d type:%s flags:%d", + nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master, type, flags); } else { - SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d", - nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master); - } - - if (!key.compare(0, MGMT_PREFIX.length(), MGMT_PREFIX)) - { - FieldValueTuple fv("oper_status", oper ? "up" : "down"); - vector fvs; - fvs.push_back(fv); - m_stateMgmtPortTable.set(key, fvs); - SWSS_LOG_INFO("Store %s oper status %s to state DB", - key.c_str(), oper ? "up" : "down"); - return; + SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d flags:%d", + nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master, flags); } /* teamd instances are dealt in teamsyncd */ diff --git a/portsyncd/linksync.h b/portsyncd/linksync.h index d72e1ba124..5b31ed9b3c 100644 --- a/portsyncd/linksync.h +++ b/portsyncd/linksync.h @@ -20,7 +20,7 @@ class LinkSync : public NetMsg private: ProducerStateTable m_portTableProducer; - Table m_portTable, m_statePortTable, m_stateMgmtPortTable; + Table m_portTable, m_statePortTable; std::map m_ifindexNameMap; std::map m_ifindexOldNameMap; diff --git a/tests/conftest.py b/tests/conftest.py index abf9955cd7..e14d977ac9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -462,7 +462,7 @@ def collect_coverage(self): self.runcmd('supervisorctl stop all') # Generate the converage info by lcov and copy to the host - cmd = f"docker exec {self.ctn.short_id} sh -c 'cd $BUILD_DIR; rm -rf **/.libs ./lib/libSaiRedis*; lcov -c --directory . --no-external --exclude tests --ignore-errors gcov,unused --output-file /tmp/coverage.info; sed -i \"s#SF:$BUILD_DIR/#SF:#\" /tmp/coverage.info; lcov_cobertura /tmp/coverage.info -o /tmp/coverage.xml'" + cmd = f"docker exec {self.ctn.short_id} sh -c 'cd $BUILD_DIR; rm -rf **/.libs ./lib/libSaiRedis*; lcov -c --directory . --no-external --exclude tests --ignore-errors gcov,unused --output-file /tmp/coverage.info && lcov --add-tracefile /tmp/coverage.info -o /tmp/coverage.info; sed -i \"s#SF:$BUILD_DIR/#SF:#\" /tmp/coverage.info; lcov_cobertura /tmp/coverage.info -o /tmp/coverage.xml'" subprocess.getstatusoutput(cmd) cmd = f"docker exec {self.ctn.short_id} sh -c 'cd $BUILD_DIR; find . -name *.gcda -type f -exec tar -rf /tmp/gcda.tar {{}} \\;'" subprocess.getstatusoutput(cmd) @@ -737,6 +737,21 @@ def stop_zebra(self): self.runcmd(['sh', '-c', 'pkill -9 zebra']) time.sleep(5) + def stop_teamsyncd(self): + self.runcmd(['sh', '-c', 'pkill -9 teamsyncd']) + + time.sleep(5) + + def start_teamsyncd(self): + self.runcmd(['sh', '-c', 'supervisorctl start teamsyncd']) + + time.sleep(5) + + def restart_teammgrd(self): + self.runcmd(['sh', '-c', 'supervisorctl restart teammgrd']) + + time.sleep(5) + # deps: warm_reboot def start_fpmsyncd(self): self.runcmd(['sh', '-c', 'supervisorctl start fpmsyncd']) @@ -1094,6 +1109,28 @@ def set_interface_status(self, interface, admin_status): tbl.set(interface, fvs) time.sleep(1) + def get_interface_oper_status(self, interface): + _, output = self.runcmd(f"ip --brief address show {interface}") + state = output.split()[1] + return state + + def get_interface_link_local_ipv6(self, interface, subnet=False): + """ + If subnet is True, the returned address will include the subnet length (e.g., fe80::aa:bbff:fecc:ddee/64) + """ + _, output = self.runcmd(f"ip --brief address show {interface}") + ipv6 = output.split()[2] + if not subnet: + slash = ipv6.find('/') + if slash > 0: + ipv6 = ipv6[0:slash] + return ipv6 + + def get_interface_mac(self, interface): + _, output = self.runcmd(f"ip --brief link show {interface}") + mac = output.split()[2] + return mac + # deps: acl, fdb_update, fdb, mirror_port_erspan, vlan, sub port intf def add_ip_address(self, interface, ip, vrf_name=None): if interface.startswith("PortChannel"): diff --git a/tests/dash/test_dash_vnet.py b/tests/dash/test_dash_vnet.py index fa8f457bb8..8409db7ce6 100644 --- a/tests/dash/test_dash_vnet.py +++ b/tests/dash/test_dash_vnet.py @@ -44,6 +44,7 @@ def test_appliance(self, dash_db: DashDB): direction_keys = dash_db.wait_for_asic_db_keys(ASIC_DIRECTION_LOOKUP_TABLE) dl_attrs = dash_db.get_asic_db_entry(ASIC_DIRECTION_LOOKUP_TABLE, direction_keys[0]) assert_sai_attribute_exists("SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION", dl_attrs, "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION") + assert_sai_attribute_exists("SAI_DIRECTION_LOOKUP_ENTRY_ATTR_DASH_ENI_MAC_OVERRIDE_TYPE", dl_attrs, "SAI_DASH_ENI_MAC_OVERRIDE_TYPE_DST_MAC") vip_keys = dash_db.wait_for_asic_db_keys(ASIC_VIP_TABLE) vip_attrs = dash_db.get_asic_db_entry(ASIC_VIP_TABLE, vip_keys[0]) diff --git a/tests/dvslib/dvs_vlan.py b/tests/dvslib/dvs_vlan.py index 418f3be666..303fa46c05 100644 --- a/tests/dvslib/dvs_vlan.py +++ b/tests/dvslib/dvs_vlan.py @@ -13,6 +13,11 @@ def create_vlan(self, vlanID): vlan_entry = {"vlanid": vlanID} self.config_db.create_entry("VLAN", vlan, vlan_entry) + def create_vlan_with_mac(self, vlanID, mac): + vlan = f"Vlan{vlanID}" + vlan_entry = {"vlanid": vlanID, "mac": mac} + self.config_db.create_entry("VLAN", vlan, vlan_entry) + def create_vlan_interface(self, vlanID): vlan = "Vlan{}".format(vlanID) vlan_intf_entry = {} diff --git a/tests/evpn_tunnel.py b/tests/evpn_tunnel.py index 346064e004..98dc45f2c7 100644 --- a/tests/evpn_tunnel.py +++ b/tests/evpn_tunnel.py @@ -569,6 +569,8 @@ def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': encapstr, 'SAI_TUNNEL_ATTR_PEER_MODE': 'SAI_TUNNEL_PEER_MODE_P2MP', 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': 'SAI_TUNNEL_TTL_MODE_PIPE_MODEL', + 'SAI_TUNNEL_ATTR_ENCAP_TTL_VAL': '255', } ) @@ -675,6 +677,8 @@ def check_vxlan_dip_tunnel(self, dvs, vtep_name, src_ip, dip): 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': encapstr, 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, 'SAI_TUNNEL_ATTR_ENCAP_DST_IP': dip, + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': 'SAI_TUNNEL_TTL_MODE_PIPE_MODEL', + 'SAI_TUNNEL_ATTR_ENCAP_TTL_VAL': '255', } ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_TABLE, expected_tun_attributes) diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 72f0fdf6ee..1abe482c4c 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -26,6 +26,7 @@ LDADD_GTEST = -L/usr/src/gtest tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent -I$(P4_ORCH_DIR)/tests -I$(DASH_ORCH_DIR) -I$(top_srcdir)/warmrestart tests_SOURCES = aclorch_ut.cpp \ + aclorch_rule_ut.cpp \ portsorch_ut.cpp \ routeorch_ut.cpp \ qosorch_ut.cpp \ @@ -227,8 +228,8 @@ tests_teammgrd_SOURCES = teammgrd/teammgr_ut.cpp \ tests_teammgrd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/lib tests_teammgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_teammgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_teammgrd_INCLUDES) -tests_teammgrd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ - -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main +tests_teammgrd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -ldl -lhiredis \ + -lswsscommon -lgtest -lgtest_main -lzmq -lpthread -lgmock -lgmock_main ## fpmsyncd unit tests diff --git a/tests/mock_tests/aclorch_rule_ut.cpp b/tests/mock_tests/aclorch_rule_ut.cpp new file mode 100644 index 0000000000..4e24a5651a --- /dev/null +++ b/tests/mock_tests/aclorch_rule_ut.cpp @@ -0,0 +1,296 @@ +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_orch_test.h" +#include "check.h" + +EXTERN_MOCK_FNS + +/* + This test provides a framework to mock create_acl_entry & remove_acl_entry API's +*/ +namespace aclorch_rule_test +{ + DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); + /* To mock Redirect Action functionality */ + DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); + + using namespace ::testing; + using namespace std; + using namespace saimeta; + using namespace swss; + using namespace mock_orch_test; + + struct SaiMockState + { + /* Add extra attributes on demand */ + vector create_attrs; + sai_status_t create_status = SAI_STATUS_SUCCESS; + sai_status_t remove_status = SAI_STATUS_SUCCESS; + sai_object_id_t remove_oid; + sai_object_id_t create_oid; + + sai_status_t handleCreate(sai_object_id_t *sai, sai_object_id_t switch_id, uint32_t attr_count, const sai_attribute_t *attr_list) + { + *sai = create_oid; + create_attrs.clear(); + for (uint32_t i = 0; i < attr_count; ++i) + { + create_attrs.emplace_back(attr_list[i]); + } + return create_status; + } + + sai_status_t handleRemove(sai_object_id_t oid) + { + EXPECT_EQ(oid, remove_oid); + return remove_status; + } + }; + + struct AclOrchRuleTest : public MockOrchTest + { + unique_ptr aclMockState; + + void PostSetUp() override + { + INIT_SAI_API_MOCK(acl); + INIT_SAI_API_MOCK(next_hop); + MockSaiApis(); + + aclMockState = make_unique(); + /* Port init done is a pre-req for Aclorch */ + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_app_db.get(), APP_PORT_TABLE_NAME, 1, 1), gPortsOrch, APP_PORT_TABLE_NAME)); + consumer->addToSync({ { "PortInitDone", EMPTY_PREFIX, { { "", "" } } } }); + static_cast(gPortsOrch)->doTask(*consumer.get()); + } + + void PreTearDown() override + { + aclMockState.reset(); + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(next_hop); + DEINIT_SAI_API_MOCK(acl); + } + + void doAclTableTypeTask(const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_config_db.get(), CFG_ACL_TABLE_TYPE_TABLE_NAME, 1, 1), + gAclOrch, CFG_ACL_TABLE_TYPE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gAclOrch)->doTask(*consumer); + } + + void doAclTableTask(const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_config_db.get(), CFG_ACL_TABLE_TABLE_NAME, 1, 1), + gAclOrch, CFG_ACL_TABLE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gAclOrch)->doTask(*consumer); + } + + void doAclRuleTask(const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_config_db.get(), CFG_ACL_RULE_TABLE_NAME, 1, 1), + gAclOrch, CFG_ACL_RULE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gAclOrch)->doTask(*consumer); + } + }; + + struct AclRedirectActionTest : public AclOrchRuleTest + { + string acl_table_type = "TEST_ACL_TABLE_TYPE"; + string acl_table = "TEST_ACL_TABLE"; + string acl_rule = "TEST_ACL_RULE"; + + string mock_tunnel_name = "tunnel0"; + string mock_invalid_tunnel_name = "tunnel1"; + string mock_src_ip = "20.0.0.1"; + string mock_nh_ip_str = "20.0.0.3"; + string mock_invalid_nh_ip_str = "20.0.0.4"; + sai_object_id_t nh_oid = 0x400000000064d; + + void PostSetUp() override + { + AclOrchRuleTest::PostSetUp(); + + /* Create a tunnel */ + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME, 1, 1), + m_VxlanTunnelOrch, APP_VXLAN_TUNNEL_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + mock_tunnel_name, + SET_COMMAND, + { + { "src_ip", mock_src_ip } + } + } + } + )); + static_cast(m_VxlanTunnelOrch)->doTask(*consumer.get()); + + populateAclTale(); + setDefaultMockState(); + } + + void PreTearDown() override + { + AclOrchRuleTest::PreTearDown(); + + /* Delete the Tunnel Object */ + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME, 1, 1), + m_VxlanTunnelOrch, APP_VXLAN_TUNNEL_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + mock_tunnel_name, + DEL_COMMAND, + { } + } + } + )); + static_cast(m_VxlanTunnelOrch)->doTask(*consumer.get()); + } + + void createTunnelNH(string ip) + { + IpAddress mock_nh_ip(ip); + ASSERT_EQ(m_VxlanTunnelOrch->createNextHopTunnel(mock_tunnel_name, mock_nh_ip, MacAddress()), nh_oid); + } + + void populateAclTale() + { + /* Create a Table type and Table */ + doAclTableTypeTask({ + { + acl_table_type, + SET_COMMAND, + { + { ACL_TABLE_TYPE_MATCHES, MATCH_DST_IP }, + { ACL_TABLE_TYPE_ACTIONS, ACTION_REDIRECT_ACTION } + } + } + }); + doAclTableTask({ + { + acl_table, + SET_COMMAND, + { + { ACL_TABLE_TYPE, acl_table_type }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + } + } + }); + } + + void addTunnelNhRule(string ip, string tunnel_name) + { + /* Create a rule */ + doAclRuleTask({ + { + acl_table + "|" + acl_rule, + SET_COMMAND, + { + { RULE_PRIORITY, "9999" }, + { MATCH_DST_IP, "10.0.0.1/24" }, + { ACTION_REDIRECT_ACTION, ip + "@" + tunnel_name } + } + } + }); + } + + void delTunnelNhRule() + { + doAclRuleTask( + { + { + acl_table + "|" + acl_rule, + DEL_COMMAND, + { } + } + }); + } + + void setDefaultMockState() + { + aclMockState->create_status = SAI_STATUS_SUCCESS; + aclMockState->remove_status = SAI_STATUS_SUCCESS; + aclMockState->create_oid = nh_oid; + aclMockState->remove_oid = nh_oid; + } + }; + + TEST_F(AclRedirectActionTest, TunnelNH) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop).WillOnce(DoAll(SetArgPointee<0>(nh_oid), + Return(SAI_STATUS_SUCCESS) + )); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleCreate)); + addTunnelNhRule(mock_nh_ip_str, mock_tunnel_name); + + /* Verify SAI attributes and if the rule is created */ + SaiAttributeList attr_list(SAI_OBJECT_TYPE_ACL_ENTRY, vector({ + { "SAI_ACL_ENTRY_ATTR_TABLE_ID", sai_serialize_object_id(gAclOrch->getTableById(acl_table)) }, + { "SAI_ACL_ENTRY_ATTR_PRIORITY", "9999" }, + { "SAI_ACL_ENTRY_ATTR_ADMIN_STATE", "true" }, + { "SAI_ACL_ENTRY_ATTR_ACTION_COUNTER", "oid:0xfffffffffff"}, + { "SAI_ACL_ENTRY_ATTR_FIELD_DST_IP", "10.0.0.1&mask:255.255.255.0"}, + { "SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT", sai_serialize_object_id(nh_oid) } + }), false); + vector skip_list = {false, false, false, true, false, false}; /* skip checking counter */ + ASSERT_TRUE(Check::AttrListSubset(SAI_OBJECT_TYPE_ACL_ENTRY, aclMockState->create_attrs, attr_list, skip_list)); + ASSERT_TRUE(gAclOrch->getAclRule(acl_table, acl_rule)); + + /* ACLRule is deleted along with Nexthop */ + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop).Times(1).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleRemove)); + delTunnelNhRule(); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } + + TEST_F(AclRedirectActionTest, TunnelNH_ExistingNhObject) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop).WillOnce(DoAll(SetArgPointee<0>(nh_oid), + Return(SAI_STATUS_SUCCESS) + )); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleCreate)); + createTunnelNH(mock_nh_ip_str); + addTunnelNhRule(mock_nh_ip_str, mock_tunnel_name); + ASSERT_TRUE(gAclOrch->getAclRule(acl_table, acl_rule)); + + /* ACL Rule is deleted but nexthop is not deleted */ + EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleRemove)); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop).Times(0); + delTunnelNhRule(); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } + + TEST_F(AclRedirectActionTest, TunnelNH_InvalidTunnel) + { + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).Times(0); + addTunnelNhRule(mock_nh_ip_str, mock_invalid_tunnel_name); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } + + TEST_F(AclRedirectActionTest, TunnelNH_InvalidNextHop) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop).WillOnce( + Return(SAI_STATUS_FAILURE) /* create next hop fails */ + ); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).Times(0); + addTunnelNhRule(mock_invalid_nh_ip_str, mock_tunnel_name); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } +} diff --git a/tests/mock_tests/check.h b/tests/mock_tests/check.h index d1b095562d..a13f2abd3c 100644 --- a/tests/mock_tests/check.h +++ b/tests/mock_tests/check.h @@ -42,40 +42,94 @@ struct Check std::cerr << "Expected: " << meta->attridname << "\n"; } } - continue; } - const int MAX_BUF_SIZE = 0x4000; - std::string act_str; - std::string exp_str; + const sai_attribute_t* act = &act_attr_list[i]; + const sai_attribute_t* exp = &exp_attr_list.get_attr_list()[i]; + if (!Check::AttrValue(objecttype, id, act, exp)) + { + return false; + } + } + + return true; + } + + static bool AttrValue(sai_object_type_t objecttype, sai_attr_id_t id, const sai_attribute_t* act, const sai_attribute_t* exp) + { + auto meta = sai_metadata_get_attr_metadata(objecttype, id); + assert(meta != nullptr); + + const int MAX_BUF_SIZE = 0x4000; + std::vector act_buf(MAX_BUF_SIZE); + std::vector exp_buf(MAX_BUF_SIZE); + + act_buf.reserve(MAX_BUF_SIZE); + exp_buf.reserve(MAX_BUF_SIZE); + + auto act_len = sai_serialize_attribute_value(act_buf.data(), meta, &act->value); + auto exp_len = sai_serialize_attribute_value(exp_buf.data(), meta, &exp->value); - act_str.reserve(MAX_BUF_SIZE); - exp_str.reserve(MAX_BUF_SIZE); + assert(act_len < act_str.size()); + assert(act_len < exp_str.size()); - auto act_len = sai_serialize_attribute_value(&act_str[0], meta, &act_attr_list[i].value); - auto exp_len = sai_serialize_attribute_value(&exp_str[0], meta, &exp_attr_list.get_attr_list()[i].value); + act_buf.resize(act_len); + exp_buf.resize(exp_len); - assert(act_len < act_str.size()); - assert(act_len < exp_str.size()); + std::string act_str(act_buf.begin(), act_buf.end()); + std::string exp_str(exp_buf.begin(), exp_buf.end()); + + if (act_len != exp_len) + { + std::cerr << "AttrValue length failed\n"; + std::cerr << "Actual: " << act_len << "," << act_str << "\n"; + std::cerr << "Expected: " << exp_len << "," << exp_str << "\n"; + return false; + } - if (act_len != exp_len) + if (act_str != exp_str) + { + std::cerr << "AttrValue string failed\n"; + std::cerr << "Actual: " << act_str << "\n"; + std::cerr << "Expected: " << exp_str << "\n"; + return false; + } + return true; + } + + static bool AttrListSubset(sai_object_type_t objecttype, const std::vector &act_attr_list, + saimeta::SaiAttributeList &exp_attr_list, const std::vector skip_check) + { + /* + Size of attributes should be equal and in the same order. + If the validation has to be skipped for certain attributes populate the skip_check. + */ + if (act_attr_list.size() != exp_attr_list.get_attr_count()) + { + std::cerr << "AttrListSubset size mismatch\n"; + return false; + } + if (act_attr_list.size() != skip_check.size()) + { + std::cerr << "AttrListSubset size mismatch\n"; + return false; + } + + for (uint32_t i = 0; i < exp_attr_list.get_attr_count(); ++i) + { + if (skip_check[i]) { - std::cerr << "AttrListEq failed\n"; - std::cerr << "Actual: " << act_str << "\n"; - std::cerr << "Expected: " << exp_str << "\n"; - return false; + continue; } - - if (act_str != exp_str) + sai_attr_id_t id = exp_attr_list.get_attr_list()[i].id; + const sai_attribute_t* act = &act_attr_list[i]; + const sai_attribute_t* exp = &exp_attr_list.get_attr_list()[i]; + if (!Check::AttrValue(objecttype, id, act, exp)) { - std::cerr << "AttrListEq failed\n"; - std::cerr << "Actual: " << act_str << "\n"; - std::cerr << "Expected: " << exp_str << "\n"; return false; } } - return true; } }; diff --git a/tests/mock_tests/flexcounter_ut.cpp b/tests/mock_tests/flexcounter_ut.cpp index fa3b62e795..22e3bdab65 100644 --- a/tests/mock_tests/flexcounter_ut.cpp +++ b/tests/mock_tests/flexcounter_ut.cpp @@ -111,6 +111,10 @@ namespace flexcounter_test } else { + if (flexCounterGroupParam->bulk_chunk_size.list != nullptr || flexCounterGroupParam->bulk_chunk_size_per_prefix.list != nullptr) + { + return SAI_STATUS_SUCCESS; + } mockFlexCounterGroupTable->del(key); } @@ -824,6 +828,47 @@ namespace flexcounter_test consumer->addToSync(entries); entries.clear(); static_cast(gBufferOrch)->doTask(); + + if (!gTraditionalFlexCounter) + { + // Verify bulk chunk size fields which can be verified in any combination of parameters. + // We verify it here just for convenience. + consumer = dynamic_cast(flexCounterOrch->getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"}, + {"BULK_CHUNK_SIZE", "64"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_TRUE(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT") != flexCounterOrch->m_groupsWithBulkChunkSize.end()); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_EQ(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT"), flexCounterOrch->m_groupsWithBulkChunkSize.end()); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"}, + {"BULK_CHUNK_SIZE_PER_PREFIX", "SAI_PORT_STAT_IF_OUT_QLEN:0;SAI_PORT_STAT_IF_IN_FEC:32"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_TRUE(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT") != flexCounterOrch->m_groupsWithBulkChunkSize.end()); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_EQ(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT"), flexCounterOrch->m_groupsWithBulkChunkSize.end()); + } } // Remove buffer pools diff --git a/tests/mock_tests/fpmsyncd/test_routesync.cpp b/tests/mock_tests/fpmsyncd/test_routesync.cpp index b1c23aca85..147900a48e 100644 --- a/tests/mock_tests/fpmsyncd/test_routesync.cpp +++ b/tests/mock_tests/fpmsyncd/test_routesync.cpp @@ -1,5 +1,5 @@ #include "redisutility.h" - +#include "ut_helpers_fpmsyncd.h" #include #include #include "mock_table.h" @@ -7,13 +7,25 @@ #include "fpmsyncd/routesync.h" #undef private +#include +#include +#include +#include +#include + +#include + using namespace swss; +using namespace testing; + #define MAX_PAYLOAD 1024 using ::testing::_; int rt_build_ret = 0; bool nlmsg_alloc_ret = true; +#pragma GCC diagnostic ignored "-Wcast-align" + class MockRouteSync : public RouteSync { public: @@ -28,6 +40,7 @@ class MockRouteSync : public RouteSync rtattr *[], std::string&, std::string& , std::string&, std::string&), (override)); + MOCK_METHOD(bool, getIfName, (int, char *, size_t), (override)); }; class MockFpm : public FpmInterface { @@ -69,6 +82,10 @@ class FpmSyncdResponseTest : public ::testing::Test RouteSync m_routeSync{m_pipeline.get()}; MockFpm m_mockFpm{&m_routeSync}; MockRouteSync m_mockRouteSync{m_pipeline.get()}; + + const char* test_gateway = "192.168.1.1"; + const char* test_gateway_ = "192.168.1.2"; + const char* test_gateway__ = "192.168.1.3"; }; TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV4) @@ -224,6 +241,7 @@ TEST_F(FpmSyncdResponseTest, testEvpn) return true; }); m_mockRouteSync.onMsgRaw(nlh); + vector keys; vector fieldValues; app_route_table.getKeys(keys); @@ -237,15 +255,652 @@ TEST_F(FpmSyncdResponseTest, testEvpn) TEST_F(FpmSyncdResponseTest, testSendOffloadReply) { - rt_build_ret = 1; rtnl_route* routeObject{}; - ASSERT_EQ(m_routeSync.sendOffloadReply(routeObject), false); rt_build_ret = 0; nlmsg_alloc_ret = false; ASSERT_EQ(m_routeSync.sendOffloadReply(routeObject), false); nlmsg_alloc_ret = true; +} + +struct nlmsghdr* createNewNextHopMsgHdr(int32_t ifindex, const char* gateway, uint32_t id, unsigned char nh_family=AF_INET) { + struct nlmsghdr *nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(MAX_PAYLOAD)); + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + + // Set header + nlh->nlmsg_type = RTM_NEWNEXTHOP; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); + + // Set nhmsg + struct nhmsg *nhm = (struct nhmsg *)NLMSG_DATA(nlh); + nhm->nh_family = nh_family; + + // Add NHA_ID + struct rtattr *rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_ID; + rta->rta_len = RTA_LENGTH(sizeof(uint32_t)); + *(uint32_t *)RTA_DATA(rta) = id; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + // Add NHA_OIF + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_OIF; + rta->rta_len = RTA_LENGTH(sizeof(int32_t)); + *(int32_t *)RTA_DATA(rta) = ifindex; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + // Add NHA_GATEWAY + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_GATEWAY; + if (nh_family == AF_INET6) + { + struct in6_addr gw_addr6; + inet_pton(AF_INET6, gateway, &gw_addr6); + rta->rta_len = RTA_LENGTH(sizeof(struct in6_addr)); + memcpy(RTA_DATA(rta), &gw_addr6, sizeof(struct in6_addr)); + } + else + { + struct in_addr gw_addr; + inet_pton(AF_INET, gateway, &gw_addr); + rta->rta_len = RTA_LENGTH(sizeof(struct in_addr)); + memcpy(RTA_DATA(rta), &gw_addr, sizeof(struct in_addr)); + } + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + return nlh; +} + +TEST_F(FpmSyncdResponseTest, TestNoNHAId) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(MAX_PAYLOAD)); + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + + nlh->nlmsg_type = RTM_NEWNEXTHOP; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); + struct nhmsg *nhm = (struct nhmsg *)NLMSG_DATA(nlh); + nhm->nh_family = AF_INET; + + EXPECT_CALL(m_mockRouteSync, getIfName(_, _, _)) + .Times(0); + + m_mockRouteSync.onNextHopMsg(nlh, 0); + + free(nlh); +} + +TEST_F(FpmSyncdResponseTest, TestNextHopAdd) +{ + uint32_t test_id = 10; + int32_t test_ifindex = 5; + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + auto it = m_mockRouteSync.m_nh_groups.find(test_id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add new nexthop"; + + free(nlh); +} + +TEST_F(FpmSyncdResponseTest, TestIPv6NextHopAdd) +{ + uint32_t test_id = 20; + const char* test_gateway = "2001:db8::1"; + int32_t test_ifindex = 7; + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id, AF_INET6); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet2", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + vector fieldValues; + string key = to_string(test_id); + nexthop_group_table.get(key, fieldValues); + + // onNextHopMsg only updates m_nh_groups unless the nhg is marked as installed + ASSERT_TRUE(fieldValues.empty()); + + // Update the nexthop group to mark it as installed and write to DB + m_mockRouteSync.installNextHopGroup(test_id); + nexthop_group_table.get(key, fieldValues); + + string nexthop, ifname; + for (const auto& fv : fieldValues) { + if (fvField(fv) == "nexthop") { + nexthop = fvValue(fv); + } else if (fvField(fv) == "ifname") { + ifname = fvValue(fv); + } + } + + EXPECT_EQ(nexthop, test_gateway); + EXPECT_EQ(ifname, "Ethernet2"); + + free(nlh); +} + + +TEST_F(FpmSyncdResponseTest, TestGetIfNameFailure) +{ + uint32_t test_id = 22; + int32_t test_ifindex = 9; + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(Return(false)); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + auto it = m_mockRouteSync.m_nh_groups.find(test_id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()); + EXPECT_EQ(it->second.intf, "unknown"); + + free(nlh); +} +TEST_F(FpmSyncdResponseTest, TestSkipSpecialInterfaces) +{ + uint32_t test_id = 11; + int32_t test_ifindex = 6; + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t ifidx, char* ifname, size_t size) { + strncpy(ifname, "eth0", size); + }, + Return(true) + )); + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + auto it = m_mockRouteSync.m_nh_groups.find(test_id); + EXPECT_EQ(it, m_mockRouteSync.m_nh_groups.end()) << "Should skip eth0 interface"; + + free(nlh); +} + +TEST_F(FpmSyncdResponseTest, TestNextHopGroupKeyString) +{ + EXPECT_EQ(m_mockRouteSync.getNextHopGroupKeyAsString(1), "1"); + EXPECT_EQ(m_mockRouteSync.getNextHopGroupKeyAsString(1234), "1234"); +} + +TEST_F(FpmSyncdResponseTest, TestGetNextHopGroupFields) +{ + // Test single next hop case + { + NextHopGroup nhg(1, test_gateway, "Ethernet0"); + m_mockRouteSync.m_nh_groups.insert({1, nhg}); + + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights); + + EXPECT_EQ(nexthops, test_gateway); + EXPECT_EQ(ifnames, "Ethernet0"); + EXPECT_TRUE(weights.empty()); + } + + // Test multiple next hops with weights + { + // Create the component next hops first + NextHopGroup nhg1(1, test_gateway, "Ethernet0"); + NextHopGroup nhg2(2, test_gateway_, "Ethernet1"); + m_mockRouteSync.m_nh_groups.insert({1, nhg1}); + m_mockRouteSync.m_nh_groups.insert({2, nhg2}); + + // Create the group with multiple next hops + vector> group_members; + group_members.push_back(make_pair(1, 1)); // id=1, weight=1 + group_members.push_back(make_pair(2, 2)); // id=2, weight=2 + + NextHopGroup nhg(3, group_members); + m_mockRouteSync.m_nh_groups.insert({3, nhg}); + + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights); + + EXPECT_EQ(nexthops, "192.168.1.1,192.168.1.2"); + EXPECT_EQ(ifnames, "Ethernet0,Ethernet1"); + EXPECT_EQ(weights, "1,2"); + } + + // Test IPv6 default case + { + NextHopGroup nhg(4, "", "Ethernet0"); + m_mockRouteSync.m_nh_groups.insert({4, nhg}); + + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights, AF_INET6); + + EXPECT_EQ(nexthops, "::"); + EXPECT_EQ(ifnames, "Ethernet0"); + EXPECT_TRUE(weights.empty()); + } + + // Both empty + { + NextHopGroup nhg(5, "", ""); + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights, AF_INET); + + EXPECT_EQ(nexthops, "0.0.0.0"); + EXPECT_TRUE(ifnames.empty()); + EXPECT_TRUE(weights.empty()); + } +} + +TEST_F(FpmSyncdResponseTest, TestUpdateNextHopGroupDb) +{ + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + // Test single next hop group + { + NextHopGroup nhg(1, test_gateway, "Ethernet0"); + m_mockRouteSync.updateNextHopGroupDb(nhg); + + vector fieldValues; + nexthop_group_table.get("1", fieldValues); + + EXPECT_EQ(fieldValues.size(), 2); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), test_gateway); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), "Ethernet0"); + } + + // Test group with multiple next hops + { + vector> group_members; + group_members.push_back(make_pair(1, 1)); + group_members.push_back(make_pair(2, 2)); + + NextHopGroup nhg1(1, test_gateway, "Ethernet0"); + NextHopGroup nhg2(2, test_gateway_, "Ethernet1"); + NextHopGroup group(3, group_members); + + m_mockRouteSync.m_nh_groups.insert({1, nhg1}); + m_mockRouteSync.m_nh_groups.insert({2, nhg2}); + m_mockRouteSync.m_nh_groups.insert({3, group}); + + m_mockRouteSync.installNextHopGroup(3); + + auto it = m_mockRouteSync.m_nh_groups.find(3); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()); + EXPECT_TRUE(it->second.installed); + vector fieldValues; + nexthop_group_table.get("3", fieldValues); + EXPECT_EQ(fieldValues.size(), 3); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), "192.168.1.1,192.168.1.2"); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), "Ethernet0,Ethernet1"); + EXPECT_EQ(fvField(fieldValues[2]), "weight"); + EXPECT_EQ(fvValue(fieldValues[2]), "1,2"); + } + + // Empty nexthop (default route case) + { + NextHopGroup nhg(4, "", "Ethernet0"); + m_mockRouteSync.updateNextHopGroupDb(nhg); + + vector fieldValues; + nexthop_group_table.get("4", fieldValues); + + EXPECT_EQ(fieldValues.size(), 2); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), "0.0.0.0"); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), "Ethernet0"); + } + + // Empty interface name + { + NextHopGroup nhg(5, test_gateway, ""); + m_mockRouteSync.updateNextHopGroupDb(nhg); + + vector fieldValues; + nexthop_group_table.get("5", fieldValues); + + EXPECT_EQ(fieldValues.size(), 2); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), test_gateway); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), ""); + } +} + +TEST_F(FpmSyncdResponseTest, TestDeleteNextHopGroup) +{ + // Setup test groups + NextHopGroup nhg1(1, test_gateway, "Ethernet0"); + NextHopGroup nhg2(2, test_gateway_, "Ethernet1"); + nhg1.installed = true; + nhg2.installed = true; + + m_mockRouteSync.m_nh_groups.insert({1, nhg1}); + m_mockRouteSync.m_nh_groups.insert({2, nhg2}); + + // Test deletion + m_mockRouteSync.deleteNextHopGroup(1); + EXPECT_EQ(m_mockRouteSync.m_nh_groups.find(1), m_mockRouteSync.m_nh_groups.end()); + EXPECT_NE(m_mockRouteSync.m_nh_groups.find(2), m_mockRouteSync.m_nh_groups.end()); + + // Test deleting non-existent group + m_mockRouteSync.deleteNextHopGroup(999); + EXPECT_EQ(m_mockRouteSync.m_nh_groups.find(999), m_mockRouteSync.m_nh_groups.end()); +} + +struct nlmsghdr* createNewNextHopMsgHdr(const vector>& group_members, uint32_t id) { + struct nlmsghdr *nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(MAX_PAYLOAD)); + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + + // Set header + nlh->nlmsg_type = RTM_NEWNEXTHOP; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); + + // Set nhmsg + struct nhmsg *nhm = (struct nhmsg *)NLMSG_DATA(nlh); + nhm->nh_family = AF_INET; + + // Add NHA_ID + struct rtattr *rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_ID; + rta->rta_len = RTA_LENGTH(sizeof(uint32_t)); + *(uint32_t *)RTA_DATA(rta) = id; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + // Add NHA_GROUP + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_GROUP; + struct nexthop_grp* grp = (struct nexthop_grp*)malloc(group_members.size() * sizeof(struct nexthop_grp)); + + for (size_t i = 0; i < group_members.size(); i++) { + grp[i].id = group_members[i].first; + grp[i].weight = group_members[i].second - 1; // kernel stores weight-1 + } + + size_t payload_size = group_members.size() * sizeof(struct nexthop_grp); + if (payload_size > USHRT_MAX - RTA_LENGTH(0)) { + free(nlh); + return nullptr; + } + + rta->rta_len = static_cast(RTA_LENGTH(group_members.size() * sizeof(struct nexthop_grp))); + memcpy(RTA_DATA(rta), grp, group_members.size() * sizeof(struct nexthop_grp)); + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + free(grp); + return nlh; +} + +TEST_F(FpmSyncdResponseTest, TestNextHopGroupAdd) +{ + // 1. create nexthops + uint32_t nh1_id = 1; + uint32_t nh2_id = 2; + uint32_t nh3_id = 3; + + struct nlmsghdr* nlh1 = createNewNextHopMsgHdr(1, test_gateway, nh1_id); + struct nlmsghdr* nlh2 = createNewNextHopMsgHdr(2, test_gateway_, nh2_id); + struct nlmsghdr* nlh3 = createNewNextHopMsgHdr(3, test_gateway__, nh3_id); + + EXPECT_CALL(m_mockRouteSync, getIfName(1, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + EXPECT_CALL(m_mockRouteSync, getIfName(2, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet2", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + EXPECT_CALL(m_mockRouteSync, getIfName(3, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet3", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh1, (int)(nlh1->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onNextHopMsg(nlh2, (int)(nlh2->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onNextHopMsg(nlh3, (int)(nlh3->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + // 2. create a nexthop group with these nexthops + uint32_t group_id = 10; + vector> group_members = { + {nh1_id, 1}, // id=1, weight=1 + {nh2_id, 2}, // id=2, weight=2 + {nh3_id, 3} // id=3, weight=3 + }; + + struct nlmsghdr* group_nlh = createNewNextHopMsgHdr(group_members, group_id); + ASSERT_NE(group_nlh, nullptr) << "Failed to create group nexthop message"; + m_mockRouteSync.onNextHopMsg(group_nlh, (int)(group_nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + // Verify the group was added correctly + auto it = m_mockRouteSync.m_nh_groups.find(group_id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add nexthop group"; + + // Verify group members + const auto& group = it->second.group; + ASSERT_EQ(group.size(), 3) << "Wrong number of group members"; + + // Check each member's ID and weight + EXPECT_EQ(group[0].first, nh1_id); + EXPECT_EQ(group[0].second, 1); + EXPECT_EQ(group[1].first, nh2_id); + EXPECT_EQ(group[1].second, 2); + EXPECT_EQ(group[2].first, nh3_id); + EXPECT_EQ(group[2].second, 3); + + // Mark the group as installed and verify DB update + m_mockRouteSync.installNextHopGroup(group_id); + + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + vector fieldValues; + string key = to_string(group_id); + nexthop_group_table.get(key, fieldValues); + + ASSERT_EQ(fieldValues.size(), 3) << "Wrong number of fields in DB"; + + // Verify the DB fields + string nexthops, ifnames, weights; + for (const auto& fv : fieldValues) { + if (fvField(fv) == "nexthop") { + nexthops = fvValue(fv); + } else if (fvField(fv) == "ifname") { + ifnames = fvValue(fv); + } else if (fvField(fv) == "weight") { + weights = fvValue(fv); + } + } + + EXPECT_EQ(nexthops, "192.168.1.1,192.168.1.2,192.168.1.3"); + EXPECT_EQ(ifnames, "Ethernet1,Ethernet2,Ethernet3"); + EXPECT_EQ(weights, "1,2,3"); + + // Cleanup + free(nlh1); + free(nlh2); + free(nlh3); + free(group_nlh); +} + +TEST_F(FpmSyncdResponseTest, TestRouteMsgWithNHG) +{ + Table route_table(m_db.get(), APP_ROUTE_TABLE_NAME); + auto createRoute = [](const char* prefix, uint8_t prefixlen) -> rtnl_route* { + rtnl_route* route = rtnl_route_alloc(); + nl_addr* dst_addr; + nl_addr_parse(prefix, AF_INET, &dst_addr); + rtnl_route_set_dst(route, dst_addr); + rtnl_route_set_type(route, RTN_UNICAST); + rtnl_route_set_protocol(route, RTPROT_STATIC); + rtnl_route_set_family(route, AF_INET); + rtnl_route_set_scope(route, RT_SCOPE_UNIVERSE); + rtnl_route_set_table(route, RT_TABLE_MAIN); + nl_addr_put(dst_addr); + return route; + }; + + uint32_t test_nh_id = 1; + uint32_t test_nhg_id = 2; + uint32_t test_nh_id_ = 3; + uint32_t test_nh_id__ = 4; + + // create a route + const char* test_destipprefix = "10.1.1.0"; + rtnl_route* test_route = createRoute(test_destipprefix, 24); + + // Test 1: use a non-existent nh_id + { + rtnl_route_set_nh_id(test_route, test_nh_id); + + m_mockRouteSync.onRouteMsg(RTM_NEWROUTE, (nl_object*)test_route, nullptr); + + vector keys; + route_table.getKeys(keys); + + // verify the route is discarded + EXPECT_TRUE(std::find(keys.begin(), keys.end(), test_destipprefix) == keys.end()); + } + + // Test 2: using a nexthop + { + // create the nexthop + struct nlmsghdr* nlh = createNewNextHopMsgHdr(1, test_gateway, test_nh_id); + + EXPECT_CALL(m_mockRouteSync, getIfName(1, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh, (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + free(nlh); + + rtnl_route_set_nh_id(test_route, test_nh_id); + + m_mockRouteSync.onRouteMsg(RTM_NEWROUTE, (nl_object*)test_route, nullptr); + + vector fvs; + EXPECT_TRUE(route_table.get(test_destipprefix, fvs)); + EXPECT_EQ(fvs.size(), 3); + for (const auto& fv : fvs) { + if (fvField(fv) == "nexthop") { + EXPECT_EQ(fvValue(fv), test_gateway); + } else if (fvField(fv) == "ifname") { + EXPECT_EQ(fvValue(fv), "Ethernet1"); + } else if (fvField(fv) == "protocol") { + EXPECT_EQ(fvValue(fv), "static"); + } + } + } + + // Test 3: using an nhg + { + struct nlmsghdr* nlh1 = createNewNextHopMsgHdr(2, test_gateway_, test_nh_id_); + struct nlmsghdr* nlh2 = createNewNextHopMsgHdr(3, test_gateway__, test_nh_id__); + + EXPECT_CALL(m_mockRouteSync, getIfName(2, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet2", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + EXPECT_CALL(m_mockRouteSync, getIfName(3, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet3", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh1, (int)(nlh1->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onNextHopMsg(nlh2, (int)(nlh2->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + vector> group_members = { + {test_nh_id_, 1}, + {test_nh_id__, 2} + }; + + struct nlmsghdr* group_nlh = createNewNextHopMsgHdr(group_members, test_nhg_id); + m_mockRouteSync.onNextHopMsg(group_nlh, (int)(group_nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + // create the route object referring to this next hop group + rtnl_route_set_nh_id(test_route, test_nhg_id); + m_mockRouteSync.onRouteMsg(RTM_NEWROUTE, (nl_object*)test_route, nullptr); + + vector fvs; + EXPECT_TRUE(route_table.get(test_destipprefix, fvs)); + + for (const auto& fv : fvs) { + if (fvField(fv) == "nexthop_group") { + EXPECT_EQ(fvValue(fv), "2"); + } else if (fvField(fv) == "protocol") { + EXPECT_EQ(fvValue(fv), "static"); + } + } + + vector group_fvs; + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + EXPECT_TRUE(nexthop_group_table.get("2", group_fvs)); + + // clean up + free(nlh1); + free(nlh2); + free(group_nlh); + } + rtnl_route_put(test_route); } diff --git a/tests/mock_tests/intfsorch_ut.cpp b/tests/mock_tests/intfsorch_ut.cpp index c2d65362ae..b7c2bc7397 100644 --- a/tests/mock_tests/intfsorch_ut.cpp +++ b/tests/mock_tests/intfsorch_ut.cpp @@ -330,5 +330,63 @@ namespace intfsorch_test static_cast(gIntfsOrch)->doTask(); ASSERT_EQ(current_create_count + 1, create_rif_count); ASSERT_EQ(current_remove_count + 1, remove_rif_count); + }; + + TEST_F(IntfsOrchTest, IntfsOrchVrfUpdate) + { + //create a new vrf + std::deque entries; + entries.push_back({"Vrf-Blue", "SET", { {"NULL", "NULL"}}}); + auto consumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + ASSERT_TRUE(gVrfOrch->isVRFexists("Vrf-Blue")); + auto new_vrf_reference_count = gVrfOrch->getVrfRefCount("Vrf-Blue"); + ASSERT_EQ(new_vrf_reference_count, 0); + + // create an interface + entries.clear(); + entries.push_back({"Loopback2", "SET", {}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + IntfsTable m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback2"].vrf_id, gVirtualRouterId); + + // change vrf and check if it worked + entries.clear(); + entries.push_back({"Loopback2", "SET", { {"vrf_name", "Vrf-Blue"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + auto new_vrf_updated_reference_count = gVrfOrch->getVrfRefCount("Vrf-Blue"); + ASSERT_EQ(new_vrf_reference_count + 1, new_vrf_updated_reference_count); + m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback2"].vrf_id, gVrfOrch->getVRFid("Vrf-Blue")); + + // create an interface + entries.clear(); + entries.push_back({"Loopback3", "SET", {}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback3"].vrf_id, gVirtualRouterId); + + // Add IP address to the interface + entries.clear(); + entries.push_back({"Loopback3:3.3.3.3/32", "SET", {{"scope", "global"},{"family", "IPv4"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + + // change vrf and check it doesn't affect the interface due to existing IP + entries.clear(); + entries.push_back({"Loopback3", "SET", { {"vrf_name", "Vrf-Blue"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback3"].vrf_id, gVirtualRouterId); } } \ No newline at end of file diff --git a/tests/mock_tests/mock_sai_api.h b/tests/mock_tests/mock_sai_api.h index aecb96a1af..06c347283f 100644 --- a/tests/mock_tests/mock_sai_api.h +++ b/tests/mock_tests/mock_sai_api.h @@ -11,6 +11,7 @@ To mock a particular SAI API: 3. In the SetUp method of the test class, call INIT_SAI_API_MOCK for each SAI API you want to mock. 4. In the SetUp method of the test class, call MockSaiApis. 5. In the TearDown method of the test class, call RestoreSaiApis. +6. After RestoreSaiApis, call DEINIT_SAI_API_MOCK */ using ::testing::Return; @@ -242,6 +243,14 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the apply_mock_fns.insert(&apply_sai_##sai_object_type##_api_mock); \ remove_mock_fns.insert(&remove_sai_##sai_object_type##_api_mock); +/* + Call this after RestoreSaiApis to clear the mock_fns + Required when same SAI_API is being mocked in multiple files eg: acl API in multiple tests +*/ +#define DEINIT_SAI_API_MOCK(sai_object_type) \ + apply_mock_fns.erase(&apply_sai_##sai_object_type##_api_mock); \ + remove_mock_fns.erase(&remove_sai_##sai_object_type##_api_mock); + void MockSaiApis(); void RestoreSaiApis(); #endif \ No newline at end of file diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp index d228475546..06349fed51 100644 --- a/tests/mock_tests/mux_rollback_ut.cpp +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -162,6 +162,10 @@ namespace mux_rollback_test void PreTearDown() override { RestoreSaiApis(); + DEINIT_SAI_API_MOCK(next_hop); + DEINIT_SAI_API_MOCK(acl); + DEINIT_SAI_API_MOCK(route); + DEINIT_SAI_API_MOCK(neighbor); gNeighOrch->gNeighBulker.create_entries = old_create_neighbor_entries; gNeighOrch->gNeighBulker.remove_entries = old_remove_neighbor_entries; gNeighOrch->gNextHopBulker.create_entries = old_object_create; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 0d698b8451..2dfee93b2e 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -98,6 +98,8 @@ namespace portsorch_test uint32_t _sai_set_link_event_damping_algorithm_count; uint32_t _sai_set_link_event_damping_config_count; int32_t _sai_link_event_damping_algorithm = 0; + bool set_pfc_asym_not_supported = false; + uint32_t set_pfc_asym_failures; sai_redis_link_event_damping_algo_aied_config_t _sai_link_event_damping_config = {0, 0, 0, 0, 0}; sai_status_t _ut_stub_sai_set_port_attribute( @@ -114,9 +116,15 @@ namespace portsorch_test /* Simulating failure case */ return SAI_STATUS_FAILURE; } - else if (attr[0].id == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED) - { - _sai_set_pfc_mode_count++; + else if (attr[0].id == SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE) + { + _sai_set_pfc_mode_count++; + /* Simulating failure case */ + if (set_pfc_asym_not_supported) + { + set_pfc_asym_failures++; + return SAI_STATUS_NOT_SUPPORTED; + } } else if (attr[0].id == SAI_PORT_ATTR_ADMIN_STATE) { @@ -2430,6 +2438,59 @@ namespace portsorch_test mock_port_fec_modes = old_mock_port_fec_modes; _unhook_sai_port_api(); } + + /* + * Test case: SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE is not supported by vendor + **/ + TEST_F(PortsOrchTest, PortPFCNotSupported) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_pfc_asym_not_supported = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_pfc_mode_count; + + entries.push_back({"Ethernet0", "SET", + { + { "pfc_asym", "off"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_pfc_mode_count, ++current_sai_api_call_count); + ASSERT_EQ(set_pfc_asym_failures, 1); + + set_pfc_asym_not_supported = false; + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + TEST_F(PortsOrchTest, PortTestSAIFailureHandling) { _hook_sai_port_api(); diff --git a/tests/mock_tests/portsyncd/portsyncd_ut.cpp b/tests/mock_tests/portsyncd/portsyncd_ut.cpp index f97a80e3d6..93575f68bc 100644 --- a/tests/mock_tests/portsyncd/portsyncd_ut.cpp +++ b/tests/mock_tests/portsyncd/portsyncd_ut.cpp @@ -187,18 +187,6 @@ namespace portsyncd_ut namespace portsyncd_ut { - TEST_F(PortSyncdTest, test_linkSyncInit) - { - if_ni_mock = populateNetDev(); - mockCmdStdcout = "up\n"; - swss::LinkSync sync(m_app_db.get(), m_state_db.get()); - std::vector keys; - sync.m_stateMgmtPortTable.getKeys(keys); - ASSERT_EQ(keys.size(), 1); - ASSERT_EQ(keys.back(), "eth0"); - ASSERT_EQ(mockCallArgs.back(), "cat /sys/class/net/\"eth0\"/operstate"); - } - TEST_F(PortSyncdTest, test_cacheOldIfaces) { if_ni_mock = populateNetDevAdvanced(); @@ -295,29 +283,6 @@ namespace portsyncd_ut ASSERT_EQ(sync.m_statePortTable.get("Ethernet0", ovalues), false); } - TEST_F(PortSyncdTest, test_onMsgMgmtIface){ - swss::LinkSync sync(m_app_db.get(), m_state_db.get()); - - /* Generate a netlink notification about the eth0 netdev iface */ - std::vector flags = {IFF_UP}; - struct nl_object* msg = draft_nlmsg("eth0", - flags, - "", - "00:50:56:28:0e:4a", - 16222, - 9100, - 0); - sync.onMsg(RTM_NEWLINK, msg); - - /* Verify if the update has been written to State DB */ - std::string oper_status; - ASSERT_EQ(sync.m_stateMgmtPortTable.hget("eth0", "oper_status", oper_status), true); - ASSERT_EQ(oper_status, "down"); - - /* Free Nl_object */ - free_nlobj(msg); - } - TEST_F(PortSyncdTest, test_onMsgIgnoreOldNetDev){ if_ni_mock = populateNetDevAdvanced(); swss::LinkSync sync(m_app_db.get(), m_state_db.get()); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index fe24cf1f29..f66fc9ac38 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -281,6 +281,7 @@ namespace routeorch_test for (const auto &it : ports) { portTable.set(it.first, it.second); + portTable.set(it.first, {{ "oper_status", "up" }}); } // Set PortConfigDone @@ -305,6 +306,11 @@ namespace routeorch_test {"mac_addr", "00:00:00:00:00:00" }}); intfTable.set("Ethernet4:11.0.0.1/32", { { "scope", "global" }, { "family", "IPv4" }}); + intfTable.set("Ethernet8", { {"NULL", "NULL" }, + {"vrf_name", "Vrf1"}, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Ethernet8:20.0.0.1/24", { { "scope", "global" }, + { "family", "IPv4" }}); gIntfsOrch->addExistingData(&intfTable); static_cast(gIntfsOrch)->doTask(); @@ -551,4 +557,30 @@ namespace routeorch_test ASSERT_EQ(current_create_count, create_route_count); ASSERT_EQ(current_set_count, set_route_count); } + + TEST_F(RouteOrchTest, RouteOrchTestVrfRoute) + { + std::deque entries; + entries.push_back({"Vrf2", "SET", { {"vni", "500200"}}}); + auto vrfConsumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + vrfConsumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + entries.clear(); + entries.push_back({"Ethernet8", "SET", { {"vrf_name", "Vrf2"}}}); + auto intfConsumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + intfConsumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + auto routeConsumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + entries.clear(); + entries.push_back({"Vrf2:fe80::/64", "DEL", {}}); + entries.push_back({"Vrf2:20.0.0.0/24", "DEL", {}}); + entries.push_back({"Vrf2:fe80::/64", "SET", { {"protocol", "kernel"}, + {"nexthop", "::"}, + {"ifname", "Ethernet8"}}}); + entries.push_back({"Vrf2:20.0.0.0/24", "SET", { {"protocol", "kernel"}, + {"nexthop", "0.0.0.0"}, + {"ifname", "Ethernet8"}}}); + routeConsumer->addToSync(entries); + static_cast(gRouteOrch)->doTask(); + } } diff --git a/tests/mock_tests/switchorch_ut.cpp b/tests/mock_tests/switchorch_ut.cpp index a1e8b218e4..e46fb9d07b 100644 --- a/tests/mock_tests/switchorch_ut.cpp +++ b/tests/mock_tests/switchorch_ut.cpp @@ -124,6 +124,44 @@ namespace switchorch_test gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); } + void checkAsicSdkHealthEvent(const sai_timespec_t ×tamp, const string &expected_key="") + { + initSwitchOrch(); + + sai_switch_health_data_t data; + memset(&data, 0, sizeof(data)); + data.data_type = SAI_HEALTH_DATA_TYPE_GENERAL; + vector data_from_sai({100, 101, 115, 99, 114, 105, 112, 116, 105, 245, 111, 110, 245, 10, 123, 125, 100, 100}); + sai_u8_list_t description; + description.list = data_from_sai.data(); + description.count = (uint32_t)(data_from_sai.size() - 2); + on_switch_asic_sdk_health_event(gSwitchId, + SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_FATAL, + timestamp, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW, + data, + description); + + string key; + if (expected_key.empty()) + { + vector keys; + gSwitchOrch->m_asicSdkHealthEventTable->getKeys(keys); + key = keys[0]; + } + else + { + key = expected_key; + } + string value; + gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "category", value); + ASSERT_EQ(value, "firmware"); + gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "severity", value); + ASSERT_EQ(value, "fatal"); + gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "description", value); + ASSERT_EQ(value, "description\n{}"); + } + void TearDown() override { ::testing_db::reset(); @@ -289,30 +327,13 @@ namespace switchorch_test TEST_F(SwitchOrchTest, SwitchOrchTestHandleEvent) { - initSwitchOrch(); - sai_timespec_t timestamp = {.tv_sec = 1701160447, .tv_nsec = 538710245}; - sai_switch_health_data_t data; - memset(&data, 0, sizeof(data)); - data.data_type = SAI_HEALTH_DATA_TYPE_GENERAL; - vector data_from_sai({100, 101, 115, 99, 114, 105, 112, 116, 105, 245, 111, 110, 245, 10, 123, 125, 100, 100}); - sai_u8_list_t description; - description.list = data_from_sai.data(); - description.count = (uint32_t)(data_from_sai.size() - 2); - on_switch_asic_sdk_health_event(gSwitchId, - SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_FATAL, - timestamp, - SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW, - data, - description); - - string key = "2023-11-28 08:34:07"; - string value; - gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "category", value); - ASSERT_EQ(value, "firmware"); - gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "severity", value); - ASSERT_EQ(value, "fatal"); - gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "description", value); - ASSERT_EQ(value, "description\n{}"); + checkAsicSdkHealthEvent(timestamp, "2023-11-28 08:34:07"); + } + + TEST_F(SwitchOrchTest, SwitchOrchTestHandleEventInvalidTimeStamp) + { + sai_timespec_t timestamp = {.tv_sec = 172479515853275099, .tv_nsec = 538710245}; + checkAsicSdkHealthEvent(timestamp); } } diff --git a/tests/mock_tests/teammgrd/teammgr_ut.cpp b/tests/mock_tests/teammgrd/teammgr_ut.cpp index 32f064f552..a40f39f484 100644 --- a/tests/mock_tests/teammgrd/teammgr_ut.cpp +++ b/tests/mock_tests/teammgrd/teammgr_ut.cpp @@ -1,22 +1,128 @@ #include "gtest/gtest.h" #include "../mock_table.h" #include "teammgr.h" +#include extern int (*callback)(const std::string &cmd, std::string &stdout); extern std::vector mockCallArgs; +static std::vector< std::pair > mockKillCommands; +static std::map pidFiles; + +static int (*callback_kill)(pid_t pid, int sig) = NULL; +static std::pair (*callback_fopen)(const char *pathname, const char *mode) = NULL; + +static int cb_kill(pid_t pid, int sig) +{ + mockKillCommands.push_back(std::make_pair(pid, sig)); + if (!sig) + { + errno = ESRCH; + return -1; + } + else + { + return 0; + } +} + +int kill(pid_t pid, int sig) +{ + if (callback_kill) + { + return callback_kill(pid, sig); + } + int (*realfunc)(pid_t, int) = + (int(*)(pid_t, int))(dlsym (RTLD_NEXT, "kill")); + return realfunc(pid, sig); +} + +static std::pair cb_fopen(const char *pathname, const char *mode) +{ + auto pidFileSearch = pidFiles.find(pathname); + if (pidFileSearch != pidFiles.end()) + { + if (!pidFileSearch->second) + { + errno = ENOENT; + } + return std::make_pair(true, pidFileSearch->second); + } + else + { + return std::make_pair(false, (FILE*)NULL); + } +} + +FILE* fopen(const char *pathname, const char *mode) +{ + if (callback_fopen) + { + std::pair callback_fd = callback_fopen(pathname, mode); + if (callback_fd.first) + { + return callback_fd.second; + } + } + FILE* (*realfunc)(const char *, const char *) = + (FILE* (*)(const char *, const char *))(dlsym (RTLD_NEXT, "fopen")); + return realfunc(pathname, mode); +} + +FILE* fopen64(const char *pathname, const char *mode) +{ + if (callback_fopen) + { + std::pair callback_fd = callback_fopen(pathname, mode); + if (callback_fd.first) + { + return callback_fd.second; + } + } + FILE* (*realfunc)(const char *, const char *) = + (FILE* (*)(const char *, const char *))(dlsym (RTLD_NEXT, "fopen64")); + return realfunc(pathname, mode); +} int cb(const std::string &cmd, std::string &stdout) { mockCallArgs.push_back(cmd); - if (cmd.find("/usr/bin/teamd -r -t PortChannel1") != std::string::npos) + if (cmd.find("/usr/bin/teamd -r -t PortChannel382") != std::string::npos) { + mkdir("/var/run/teamd", 0755); + std::FILE* pidFile = std::tmpfile(); + std::fputs("1234", pidFile); + std::rewind(pidFile); + pidFiles["/var/run/teamd/PortChannel382.pid"] = pidFile; return 1; } - else if (cmd.find("cat \"/var/run/teamd/PortChannel1.pid\"") != std::string::npos) + else if (cmd.find("/usr/bin/teamd -r -t PortChannel812") != std::string::npos) { - stdout = "1234"; + pidFiles["/var/run/teamd/PortChannel812.pid"] = NULL; + return 1; + } + else if (cmd.find("/usr/bin/teamd -r -t PortChannel495") != std::string::npos) + { + mkdir("/var/run/teamd", 0755); + std::FILE* pidFile = std::tmpfile(); + std::fputs("5678", pidFile); + std::rewind(pidFile); + pidFiles["/var/run/teamd/PortChannel495.pid"] = pidFile; return 0; } + else if (cmd.find("/usr/bin/teamd -r -t PortChannel198") != std::string::npos) + { + pidFiles["/var/run/teamd/PortChannel198.pid"] = NULL; + } + else + { + for (int i = 600; i < 620; i++) + { + if (cmd.find(std::string("/usr/bin/teamd -r -t PortChannel") + std::to_string(i)) != std::string::npos) + { + pidFiles[std::string("/var/run/teamd/PortChannel") + std::to_string(i) + std::string(".pid")] = NULL; + } + } + } return 0; } @@ -53,7 +159,18 @@ namespace teammgr_ut cfg_lag_tables = tables; mockCallArgs.clear(); + mockKillCommands.clear(); + pidFiles.clear(); callback = cb; + callback_kill = cb_kill; + callback_fopen = cb_fopen; + } + + virtual void TearDown() override + { + callback = NULL; + callback_kill = NULL; + callback_fopen = NULL; } }; @@ -61,18 +178,90 @@ namespace teammgr_ut { swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); - cfg_lag_table.set("PortChannel1", { { "admin_status", "up" }, + cfg_lag_table.set("PortChannel382", { { "admin_status", "up" }, { "mtu", "9100" }, { "lacp_key", "auto" }, { "min_links", "2" } }); teammgr.addExistingData(&cfg_lag_table); teammgr.doTask(); - int kill_cmd_called = 0; - for (auto cmd : mockCallArgs){ - if (cmd.find("kill -TERM 1234") != std::string::npos){ - kill_cmd_called++; - } + ASSERT_NE(mockCallArgs.size(), 0); + EXPECT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel382"), std::string::npos); + EXPECT_EQ(mockCallArgs.size(), 1); + EXPECT_EQ(mockKillCommands.size(), 1); + EXPECT_EQ(mockKillCommands.front().first, 1234); + EXPECT_EQ(mockKillCommands.front().second, SIGTERM); + } + + TEST_F(TeamMgrTest, testProcessPidFileMissingAfterAddLagFailure) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + cfg_lag_table.set("PortChannel812", { { "admin_status", "up" }, + { "mtu", "9100" }, + { "fallback", "true" }, + { "lacp_key", "auto" }, + { "min_links", "1" } }); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_NE(mockCallArgs.size(), 0); + EXPECT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel812"), std::string::npos); + EXPECT_EQ(mockCallArgs.size(), 1); + EXPECT_EQ(mockKillCommands.size(), 0); + } + + TEST_F(TeamMgrTest, testProcessCleanupAfterAddLag) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + cfg_lag_table.set("PortChannel495", { { "admin_status", "up" }, + { "mtu", "9100" }, + { "lacp_key", "auto" }, + { "min_links", "2" } }); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_EQ(mockCallArgs.size(), 3); + ASSERT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel495"), std::string::npos); + teammgr.cleanTeamProcesses(); + EXPECT_EQ(mockKillCommands.size(), 2); + EXPECT_EQ(mockKillCommands.front().first, 5678); + EXPECT_EQ(mockKillCommands.front().second, SIGTERM); + } + + TEST_F(TeamMgrTest, testProcessPidFileMissingDuringCleanup) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + cfg_lag_table.set("PortChannel198", { { "admin_status", "up" }, + { "mtu", "9100" }, + { "fallback", "true" }, + { "lacp_key", "auto" }, + { "min_links", "1" } }); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_NE(mockCallArgs.size(), 0); + EXPECT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel198"), std::string::npos); + EXPECT_EQ(mockCallArgs.size(), 3); + teammgr.cleanTeamProcesses(); + EXPECT_EQ(mockKillCommands.size(), 0); + } + + TEST_F(TeamMgrTest, testSleepDuringCleanup) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + for (int i = 600; i < 620; i++) + { + cfg_lag_table.set(std::string("PortChannel") + std::to_string(i), { { "admin_status", "up" }, + { "mtu", "9100" }, + { "lacp_key", "auto" } }); } - ASSERT_EQ(kill_cmd_called, 1); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_EQ(mockCallArgs.size(), 60); + std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now(); + teammgr.cleanTeamProcesses(); + std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); + EXPECT_EQ(mockKillCommands.size(), 0); + EXPECT_GE(std::chrono::duration_cast(end - begin).count(), 200); } -} \ No newline at end of file +} diff --git a/tests/test_pac.py b/tests/test_pac.py new file mode 100644 index 0000000000..a913fddfc9 --- /dev/null +++ b/tests/test_pac.py @@ -0,0 +1,209 @@ +import time + +from swsscommon import swsscommon + +def create_entry(tbl, key, pairs): + fvs = swsscommon.FieldValuePairs(pairs) + tbl.set(key, fvs) + + # FIXME: better to wait until DB create them + time.sleep(1) + +def remove_entry(tbl, key): + tbl._del(key) + time.sleep(1) + +def create_entry_tbl(db, table, key, pairs): + tbl = swsscommon.Table(db, table) + create_entry(tbl, key, pairs) + +def remove_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + remove_entry(tbl, key) + +def create_entry_pst(db, table, key, pairs): + tbl = swsscommon.ProducerStateTable(db, table) + create_entry(tbl, key, pairs) + +def how_many_entries_exist(db, table): + tbl = swsscommon.Table(db, table) + return len(tbl.getKeys()) + +def get_port_oid(db, port_name): + port_map_tbl = swsscommon.Table(db, 'COUNTERS_PORT_NAME_MAP') + for k in port_map_tbl.get('')[1]: + if k[0] == port_name: + return k[1] + return None + +def get_bridge_port_oid(db, port_oid): + tbl = swsscommon.Table(db, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + for key in tbl.getKeys(): + status, data = tbl.get(key) + assert status + values = dict(data) + if port_oid == values["SAI_BRIDGE_PORT_ATTR_PORT_ID"]: + return key + return None + +def check_learn_mode_in_asicdb(db, interface_oid, learn_mode): + # Get bridge port oid + bridge_port_oid = get_bridge_port_oid(db, interface_oid) + assert bridge_port_oid is not None + + tbl = swsscommon.Table(db, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + (status, fvs) = tbl.get(bridge_port_oid) + assert status == True + values = dict(fvs) + if values["SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE"] == learn_mode: + return True + else: + return False + +class TestPac(object): + def test_PacvlanMemberAndFDBAddRemove(self, dvs, testlog): + dvs.setup_db() + time.sleep(2) + + vlan_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + # create vlan + dvs.create_vlan("2") + time.sleep(1) + + # Get bvid from vlanid + ok, bvid = dvs.get_vlan_oid(dvs.adb, "2") + assert ok, bvid + + dvs.create_vlan("3") + time.sleep(1) + + # create vlan member + dvs.create_vlan_member("3", "Ethernet0") + time.sleep(1) + + # create a Vlan member entry in Oper State DB + create_entry_tbl( + dvs.sdb, + "OPER_VLAN_MEMBER", "Vlan2|Ethernet0", + [ + ("tagging_mode", "untagged"), + ] + ) + + # check that the vlan information was propagated + vlan_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + assert vlan_after - vlan_before == 2, "The Vlan2 wasn't created" + assert bp_after - bp_before == 1, "The bridge port wasn't created" + assert vm_after - vm_before == 1, "The vlan member wasn't added" + + # Add FDB entry in Oper State DB + create_entry_tbl( + dvs.sdb, + "OPER_FDB", "Vlan2|00:00:00:00:00:01", + [ + ("port", "Ethernet0"), + ("type", "dynamic"), + ("discard", "false"), + ] + ) + # Get mapping between interface name and its bridge port_id + iface_2_bridge_port_id = dvs.get_map_iface_bridge_port_id(dvs.adb) + + # check that the FDB entry was inserted into ASIC DB + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "00:00:00:00:00:01"), ("bvid", bvid)], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"), + ("SAI_FDB_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_FORWARD"), + ("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", iface_2_bridge_port_id["Ethernet0"])]) + + assert ok, str(extra) + + # Remove FDB entry in Oper State DB + remove_entry_tbl( + dvs.sdb, + "OPER_FDB", "Vlan2|00:00:00:00:00:01" + ) + + # check that the FDB entry was removed from ASIC DB + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "00:00:00:00:00:01"), ("bvid", bvid)], []) + assert ok == False, "The fdb entry still exists in ASIC" + + # remove Vlan member entry in Oper State DB + remove_entry_tbl( + dvs.sdb, + "OPER_VLAN_MEMBER", "Vlan2|Ethernet0" + ) + # check that the vlan information was propagated + vlan_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + assert vlan_after - vlan_before == 2, "The Vlan2 wasn't created" + assert bp_after - bp_before == 1, "The bridge port wasn't created" + assert vm_after - vm_before == 1, "The vlan member wasn't added" + + dvs.remove_vlan("2") + dvs.remove_vlan_member("3", "Ethernet0") + dvs.remove_vlan("3") + + vlan_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + assert vlan_after - vlan_before == 0, "The Vlan2 wasn't removed" + assert bp_after - bp_before == 0, "The bridge port wasn't removed" + assert vm_after - vm_before == 0, "The vlan member wasn't removed" + + def test_PacPortLearnMode(self, dvs, testlog): + dvs.setup_db() + time.sleep(2) + + # create vlan + dvs.create_vlan("2") + time.sleep(1) + + # create vlan member + dvs.create_vlan_member("2", "Ethernet0") + time.sleep(1) + + cntdb = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) + # get port oid + port_oid = get_port_oid(cntdb, "Ethernet0") + assert port_oid is not None + + # check asicdb before setting mac learn mode; The default learn_mode value is SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW. + status = check_learn_mode_in_asicdb(dvs.adb, port_oid, "SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW") + assert status == True + + # Set port learn mode to CPU + create_entry_tbl( + dvs.sdb, + "OPER_PORT", "Ethernet0", + [ + ("learn_mode", "cpu_trap"), + ] + ) + status = check_learn_mode_in_asicdb(dvs.adb, port_oid, "SAI_BRIDGE_PORT_FDB_LEARNING_MODE_CPU_TRAP") + assert status == True + + # Set port learn mode back to default + remove_entry_tbl( + dvs.sdb, + "OPER_PORT", "Ethernet0" + ) + status = check_learn_mode_in_asicdb(dvs.adb, port_oid, "SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW") + assert status == True + dvs.remove_vlan_member("2", "Ethernet0") + dvs.remove_vlan("2") + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_srv6.py b/tests/test_srv6.py index ee84c9a386..23ca764b64 100644 --- a/tests/test_srv6.py +++ b/tests/test_srv6.py @@ -21,6 +21,13 @@ def get_created_entry(db, table, existed_entries): assert len(new_entries) == 1, "Wrong number of created entries." return new_entries[0] +def get_created_entries(db, table, existed_entries, number): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) == number, "Wrong number of created entries." + return new_entries + class TestSrv6Mysid(object): def setup_db(self, dvs): self.pdb = dvs.get_app_db() @@ -313,7 +320,7 @@ def test_mysid(self, dvs, testlog): if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": assert fv[1] == next_hop_ipv4_id if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": - assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4" + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX4" elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" @@ -331,7 +338,7 @@ def test_mysid(self, dvs, testlog): if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": assert fv[1] == next_hop_ipv6_id if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": - assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6" + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX6" elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" @@ -478,7 +485,7 @@ def create_srv6_route(self, routeip,segname,segsrc): table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" existed_entries = get_exist_entries(self.adb.db_connection, table) - fvs=swsscommon.FieldValuePairs([('seg_src',segsrc),('segment',segname)]) + fvs=swsscommon.FieldValuePairs([('seg_src',segsrc), ('segment',segname)]) routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") routetbl.set(routeip,fvs) @@ -821,7 +828,7 @@ def test_AddRemoveSrv6MySidEnd(self, dvs, testlog): self.setup_srv6(dvs) # configure srv6 locator - dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/64 block-len 32 node-len 16 func-bits 16\"") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") # create srv6 mysid end behavior dvs.runcmd("ip -6 route add fc00:0:1:64::/128 encap seg6local action End dev sr0") @@ -866,7 +873,7 @@ def test_AddRemoveSrv6MySidEndX(self, dvs, testlog): self.setup_srv6(dvs) # configure srv6 locator - dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/64 block-len 32 node-len 16 func-bits 16\"") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") # create srv6 mysid end.x behavior dvs.runcmd("ip -6 route add fc00:0:1:65::/128 encap seg6local action End.X nh6 2001::1 dev sr0") @@ -920,7 +927,7 @@ def test_AddRemoveSrv6MySidEndDT4(self, dvs, testlog): dvs.runcmd("sysctl -w net.vrf.strict_mode=1") # configure srv6 locator - dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/64 block-len 32 node-len 16 func-bits 16\"") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") # create srv6 mysid end.dt4 behavior dvs.runcmd("ip -6 route add fc00:0:1:6b::/128 encap seg6local action End.DT4 vrftable {} dev sr0".format(self.vrf_table_id)) @@ -965,7 +972,7 @@ def test_AddRemoveSrv6MySidEndDT6(self, dvs, testlog): self.setup_srv6(dvs) # configure srv6 locator - dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/64 block-len 32 node-len 16 func-bits 16\"") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") # create srv6 mysid end.dt6 behavior dvs.runcmd("ip -6 route add fc00:0:1:6b::/128 encap seg6local action End.DT6 vrftable {} dev sr0".format(self.vrf_table_id)) @@ -1017,7 +1024,7 @@ def test_AddRemoveSrv6MySidEndDT46(self, dvs, testlog): dvs.runcmd("sysctl -w net.vrf.strict_mode=1") # configure srv6 locator - dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/64 block-len 32 node-len 16 func-bits 16\"") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") # create srv6 mysid end.dt46 behavior dvs.runcmd("ip -6 route add fc00:0:1:6b::/128 encap seg6local action End.DT46 vrftable {} dev sr0".format(self.vrf_table_id)) @@ -1181,7 +1188,7 @@ def test_AddRemoveSrv6MySidUDT4(self, dvs, testlog): assert status == True for fv in fvs: if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": - assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4" + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT4" if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": assert fv[1] == self.vrf_id @@ -1228,7 +1235,7 @@ def test_AddRemoveSrv6MySidUDT6(self, dvs, testlog): assert status == True for fv in fvs: if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": - assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6" + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT6" if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": assert fv[1] == self.vrf_id @@ -1278,7 +1285,7 @@ def test_AddRemoveSrv6MySidUDT46(self, dvs, testlog): assert status == True for fv in fvs: if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": - assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46" + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT46" if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": assert fv[1] == self.vrf_id @@ -1548,6 +1555,520 @@ def test_AddRemoveSrv6SteeringRouteIpv6(self, dvs, testlog): self.teardown_srv6(dvs) +class TestSrv6Vpn(object): + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + + def create_srv6_vpn_route(self, routeip, nexthop, segsrc, vpn_sid, ifname): + table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('seg_src', segsrc), ('nexthop', nexthop), ('vpn_sid', vpn_sid), ('ifname', ifname)]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def create_srv6_vpn_route_with_nhg(self, routeip, nhg_index, pic_ctx_index): + table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('nexthop_group', nhg_index), ('pic_context_id', pic_ctx_index)]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def update_srv6_vpn_route_attribute_with_nhg(self, routeip, nhg_index, pic_ctx_index): + fvs=swsscommon.FieldValuePairs([('nexthop_group', nhg_index), ('pic_context_id', pic_ctx_index)]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + return True + + def update_srv6_vpn_route_attribute(self, routeip, nexthops, segsrc_list, vpn_list, ifname_list): + fvs=swsscommon.FieldValuePairs([('seg_src', ",".join(segsrc_list)), ('nexthop', ",".join(nexthops)), ('vpn_sid', ",".join(vpn_list)), ('ifname', ",".join(ifname_list))]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + return True + + def remove_srv6_route(self, routeip): + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl._del(routeip) + + def create_nhg(self, nhg_index, nexthops, segsrc_list, ifname_list): + table = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('seg_src', ",".join(segsrc_list)), ('nexthop', ",".join(nexthops)), ('ifname', ",".join(ifname_list))]) + nhgtbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEXTHOP_GROUP_TABLE") + nhgtbl.set(nhg_index,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_nhg(self, nhg_index): + nhgtbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEXTHOP_GROUP_TABLE") + nhgtbl._del(nhg_index) + + def create_pic_context(self, pic_ctx_id, nexthops, vpn_list): + table = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('nexthop', ",".join(nexthops)), ('vpn_sid', ",".join(vpn_list))]) + pictbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "PIC_CONTEXT_TABLE") + pictbl.set(pic_ctx_id,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + len(vpn_list)) + return get_created_entries(self.adb.db_connection, table, existed_entries, len(vpn_list)) + + def remove_pic_context(self, pic_ctx_id): + pictbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "PIC_CONTEXT_TABLE") + pictbl._del(pic_ctx_id) + + def check_deleted_route_entries(self, destinations): + def _access_function(): + route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + route_destinations = [json.loads(route_entry)["dest"] for route_entry in route_entries] + return (all(destination not in route_destinations for destination in destinations), None) + + wait_for_result(_access_function) + + def test_srv6_vpn_with_single_nh(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + # save exist asic db entries + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + map_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + + # create v4 route with vpn sid + route_key = self.create_srv6_vpn_route('5000::/64', '2001::1', '1001:2000::1', '3000::1', 'unknown') + nexthop_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries) + tunnel_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries) + map_entry_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries) + map_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP", map_entries) + prefix_agg_id = "1" + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert prefix_agg_id == fv[1] + + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + (status, fvs) = tbl.get(map_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ATTR_TYPE": + assert fv[1] == "SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID" + + # check ASIC SAI_OBJECT_TYPE_TUNNEL database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + (status, fvs) = tbl.get(tunnel_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_ATTR_PEER_MODE": + assert fv[1] == "SAI_TUNNEL_PEER_MODE_P2P" + + # check vpn sid value in SRv6 route is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_SRV6_VPN_SID_VALUE": + assert fv[1] == "3000::1" + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + assert fv[1] == prefix_agg_id + + # check sid list value in ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + (status, fvs) = tbl.get(nexthop_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_TYPE": + assert fv[1] == "SAI_NEXT_HOP_TYPE_SRV6_SIDLIST" + + self.remove_srv6_route('5000::/64') + self.check_deleted_route_entries('5000::/64') + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + (status, fvs) = tbl.get(map_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_TUNNEL is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + (status, fvs) = tbl.get(tunnel_id) + assert status == False + + # check vpn sid value in SRv6 route is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id) + assert status == False + + # check nexthop id in SRv6 route is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + (status, fvs) = tbl.get(nexthop_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == False + + def test_pic(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + segsrc_list = [] + nexthop_list = [] + ifname_list = [] + vpn_list = [] + nhg_index = '100' + pic_ctx_index = '200' + + # save exist asic db entries + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + map_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + nexthop_group_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + nexthop_group_member_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + + segsrc_list.append('1001:2000::1') + segsrc_list.append('1001:2000::1') + + nexthop_list.append('2000::1') + nexthop_list.append('2000::2') + + ifname_list.append('unknown') + ifname_list.append('unknown') + + vpn_list.append('3000::1') + vpn_list.append('3000::2') + + self.create_nhg(nhg_index, nexthop_list, segsrc_list, ifname_list) + tunnel_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries, 2) + nh_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries, 2) + nhg_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nexthop_group_entries) + nhg_mem = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", nexthop_group_member_entries, 2) + map_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP", map_entries, 2) + + nh_ids = sorted(nh_ids) + nhg_mem = sorted(nhg_mem) + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[0]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[0] + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[1]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[1] + + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + for map_id in map_ids: + (status, fvs) = tbl.get(map_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ATTR_TYPE": + assert fv[1] == "SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID" + + # check ASIC SAI_OBJECT_TYPE_TUNNEL database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + for tunnel_id in tunnel_ids: + (status, fvs) = tbl.get(tunnel_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_ATTR_PEER_MODE": + assert fv[1] == "SAI_TUNNEL_PEER_MODE_P2P" + + # check sid list value in ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for nh_id in nh_ids: + (status, fvs) = tbl.get(nh_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_TYPE": + assert fv[1] == "SAI_NEXT_HOP_TYPE_SRV6_SIDLIST" + + self.create_pic_context(pic_ctx_index, nexthop_list, vpn_list) + map_entry_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries, 2) + prefix_agg_id = "1" + + # check vpn sid value in SRv6 route is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + for map_entry_id in map_entry_ids: + (status, fvs) = tbl.get(map_entry_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + assert fv[1] == prefix_agg_id + + # remove nhg and pic_context + self.remove_nhg(nhg_index) + self.remove_pic_context(pic_ctx_index) + + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + (status, fvs) = tbl.get(nhg_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + for nhg_mem_id in nhg_mem: + (status, fvs) = tbl.get(nhg_mem_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + for map_id in map_ids: + (status, fvs) = tbl.get(map_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_TUNNEL is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + for tunnel_id in tunnel_ids: + (status, fvs) = tbl.get(tunnel_id) + assert status == False + + # check next hop in ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for nh_id in nh_ids: + (status, fvs) = tbl.get(nh_id) + assert status == False + + # check vpn sid value in SRv6 route is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + for map_entry_id in map_entry_ids: + (status, fvs) = tbl.get(map_entry_id) + assert status == False + + def test_srv6_vpn_with_nhg(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + segsrc_list = [] + nexthop_list = [] + vpn_list = [] + ifname_list = [] + nhg_index = '100' + pic_ctx_index = '200' + + # save exist asic db entries + nexthop_group_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + + segsrc_list.append('1001:2000::1') + segsrc_list.append('1001:2000::1') + + nexthop_list.append('2000::1') + nexthop_list.append('2000::2') + + vpn_list.append('3000::1') + vpn_list.append('3000::2') + + ifname_list.append('unknown') + ifname_list.append('unknown') + + self.create_nhg(nhg_index, nexthop_list, segsrc_list, ifname_list) + self.create_pic_context(pic_ctx_index, nexthop_list, vpn_list) + route_key = self.create_srv6_vpn_route_with_nhg('5000::/64', nhg_index, pic_ctx_index) + + nhg_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nexthop_group_entries) + prefix_agg_id = "1" + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == nhg_id + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert fv[1] == prefix_agg_id + + route_key_new = self.create_srv6_vpn_route_with_nhg('5001::/64', nhg_index, pic_ctx_index) + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key_new) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == nhg_id + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert fv[1] == prefix_agg_id + + # remove routes + self.remove_srv6_route('5001::/64') + self.check_deleted_route_entries('5001::/64') + + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key_new) + assert status == False + + # remove routes + self.remove_srv6_route('5000::/64') + self.check_deleted_route_entries('5000::/64') + + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == False + + # remove nhg and pic_context + self.remove_nhg(nhg_index) + self.remove_pic_context(pic_ctx_index) + + def test_srv6_vpn_nh_update(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + segsrc_list = [] + nexthop_list = [] + vpn_list = [] + ifname_list = [] + + # save exist asic db entries + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + + nexthop_group_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + nexthop_group_member_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + map_entry_prefix_agg_id = "1" + route_entry_prefix_agg_id = "1" + route_entry_next_hop_id = "1" + + # create v4 route with vpn sid + route_key = self.create_srv6_vpn_route('5000::/64', '2000::1', '1001:2000::1', '3000::1', 'unknown') + map_entry_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries) + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + map_entry_prefix_agg_id = fv[1] + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + route_entry_next_hop_id = fv[1] + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + route_entry_prefix_agg_id = fv[1] + + segsrc_list.append('1001:2000::1') + segsrc_list.append('1001:2000::1') + + nexthop_list.append('2000::1') + nexthop_list.append('2000::2') + + vpn_list.append('3000::1') + vpn_list.append('3000::2') + + ifname_list.append('unknown') + ifname_list.append('unknown') + + nhg_index = '100' + pic_ctx_index = '200' + + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + + self.create_nhg(nhg_index, nexthop_list, segsrc_list, ifname_list) + self.create_pic_context(pic_ctx_index, nexthop_list, vpn_list) + self.update_srv6_vpn_route_attribute_with_nhg('5000::/64', nhg_index, pic_ctx_index) + + time.sleep(5) + nh_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries, 2) + nhg_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nexthop_group_entries) + nhg_mem = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", nexthop_group_member_entries, 2) + + map_entry_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries, 2) + map_entry_id_group = "1" + + for map_id in map_entry_ids: + if map_id != map_entry_id: + map_entry_id_group = map_id + break + + nh_ids = sorted(nh_ids) + nhg_mem = sorted(nhg_mem) + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id_group) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + assert fv[1] != map_entry_prefix_agg_id + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + (status, fvs) = tbl.get(nhg_id) + assert status == True + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[0]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[0] + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[1]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[1] + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] != route_entry_next_hop_id + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert fv[1] != route_entry_prefix_agg_id + + # remove routes + self.remove_srv6_route('5000::/64') + self.check_deleted_route_entries('5000::/64') + time.sleep(5) + + # remove nhg and pic_context + self.remove_nhg(nhg_index) + self.remove_pic_context(pic_ctx_index) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_sub_port_intf.py b/tests/test_sub_port_intf.py index ec76ec13bb..3c9edea5c6 100644 --- a/tests/test_sub_port_intf.py +++ b/tests/test_sub_port_intf.py @@ -393,7 +393,7 @@ def get_default_vrf_oid(self): assert len(oids) == 1, "Wrong # of default vrfs: %d, expected #: 1." % (len(oids)) return oids[0] - def get_ip_prefix_nhg_oid(self, ip_prefix, vrf_oid=None): + def get_ip_prefix_nhg_oid(self, ip_prefix, vrf_oid=None, prefix_present=True): if vrf_oid is None: vrf_oid = self.default_vrf_oid @@ -407,18 +407,24 @@ def _access_function(): route_entry_found = True assert route_entry_key["vr"] == vrf_oid break - - return (route_entry_found, raw_route_entry_key) + if prefix_present: + return (route_entry_found, raw_route_entry_key) + else: + return (not route_entry_found, None) (route_entry_found, raw_route_entry_key) = wait_for_result(_access_function) - fvs = self.asic_db.get_entry(ASIC_ROUTE_ENTRY_TABLE, raw_route_entry_key) + if not prefix_present: + assert raw_route_entry_key == None + return None + else: + fvs = self.asic_db.get_entry(ASIC_ROUTE_ENTRY_TABLE, raw_route_entry_key) - nhg_oid = fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "") - assert nhg_oid != "" - assert nhg_oid != "oid:0x0" + nhg_oid = fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "") + assert nhg_oid != "" + assert nhg_oid != "oid:0x0" - return nhg_oid + return nhg_oid def check_sub_port_intf_key_existence(self, db, table_name, key): db.wait_for_matching_keys(table_name, [key]) @@ -1543,21 +1549,26 @@ def _test_sub_port_intf_oper_down_with_pending_neigh_route_tasks(self, dvs, sub_ self.add_route_appl_db(ip_prefix, nhop_ips, ifnames, vrf_name) # Verify route entry created in ASIC_DB and get next hop group oid - nhg_oid = self.get_ip_prefix_nhg_oid(ip_prefix, vrf_oid) - - # Verify next hop group of the specified oid created in ASIC_DB - self.check_sub_port_intf_key_existence(self.asic_db, ASIC_NEXT_HOP_GROUP_TABLE, nhg_oid) + nhg_oid = self.get_ip_prefix_nhg_oid(ip_prefix, vrf_oid, prefix_present = i < (nhop_num - 1)) - # Verify next hop group member # created in ASIC_DB - nhg_member_oids = self.asic_db.wait_for_n_keys(ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, - (nhop_num - 1) - i if create_intf_on_parent_port == False else ((nhop_num - 1) - i) * 2) + if i < (nhop_num - 1): + # Verify next hop group of the specified oid created in ASIC_DB + self.check_sub_port_intf_key_existence(self.asic_db, ASIC_NEXT_HOP_GROUP_TABLE, nhg_oid) - # Verify that next hop group members all belong to the next hop group of the specified oid - fv_dict = { - "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": nhg_oid, - } - for nhg_member_oid in nhg_member_oids: - self.check_sub_port_intf_fvs(self.asic_db, ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, nhg_member_oid, fv_dict) + # Verify next hop group member # created in ASIC_DB + nhg_member_oids = self.asic_db.wait_for_n_keys(ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, + (nhop_num - 1) - i if create_intf_on_parent_port == False \ + else ((nhop_num - 1) - i) * 2) + + # Verify that next hop group members all belong to the next hop group of the specified oid + fv_dict = { + "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": nhg_oid, + } + for nhg_member_oid in nhg_member_oids: + self.check_sub_port_intf_fvs(self.asic_db, ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, nhg_member_oid, fv_dict) + else: + assert nhg_oid == None + self.asic_db.wait_for_n_keys(ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, 0) nhop_cnt = len(self.asic_db.get_keys(ASIC_NEXT_HOP_TABLE)) # Remove next hop objects on sub port interfaces diff --git a/tests/test_vlan.py b/tests/test_vlan.py index 28d3de3a29..b8a277f282 100644 --- a/tests/test_vlan.py +++ b/tests/test_vlan.py @@ -1,9 +1,25 @@ import distro import pytest +import ipaddress +import time from distutils.version import StrictVersion from dvslib.dvs_common import PollingConfig, wait_for_result + +def mac_to_link_local_ipv6(mac): + mac_bytes = mac.split(':') + mac_bytes = mac_bytes[:3] + ["ff", "fe"] + mac_bytes[3:] + second_digit = int(mac_bytes[0][1], 16) + second_digit ^= 0x2 # Reverse the second bit from right + mac_bytes[0] = mac_bytes[0][0] + format(second_digit, "x") + ipv6 = ["fe80:"] + for i in range(0, 7, 2): + ipv6 += [":", mac_bytes[i], mac_bytes[i + 1]] + ipv6 = "".join(ipv6) + return str(ipaddress.IPv6Address(ipv6)) # Conversion to IPv6Address is done to compress ipv6. + + @pytest.mark.usefixtures("testlog") @pytest.mark.usefixtures('dvs_vlan_manager') @pytest.mark.usefixtures('dvs_lag_manager') @@ -233,8 +249,58 @@ def test_AddPortChannelToVlan(self, dvs): self.dvs_vlan.create_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(1) + + self.dvs_vlan.create_vlan_member(vlan, lag_interface, "tagged") + self.dvs_vlan.get_and_verify_vlan_member_ids(1) + + self.dvs_vlan.remove_vlan_member(vlan, lag_interface) + self.dvs_vlan.get_and_verify_vlan_member_ids(0) + + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + + self.dvs_lag.remove_port_channel_member(lag_id, lag_member) + self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", 0) + + self.dvs_lag.remove_port_channel(lag_id) + self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 0) + + def test_AddPortChannelToVlanRaceCondition(self, dvs): + + vlan = "2" + lag_member = "Ethernet0" + lag_id = "0001" + lag_interface = "PortChannel{}".format(lag_id) + + self.dvs_lag.create_port_channel(lag_id) + lag_entries = self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 1) + + self.dvs_lag.create_port_channel_member(lag_id, lag_member) + + # Verify the LAG has been initialized properly + lag_member_entries = self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", 1) + fvs = self.dvs_vlan.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", lag_member_entries[0]) + assert len(fvs) == 4 + assert fvs.get("SAI_LAG_MEMBER_ATTR_LAG_ID") == lag_entries[0] + assert self.dvs_vlan.asic_db.port_to_id_map[fvs.get("SAI_LAG_MEMBER_ATTR_PORT_ID")] == lag_member + + self.dvs_vlan.create_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(1) + # Kill teamsyncd + dvs.stop_teamsyncd() + + # Delete netdevice + dvs.runcmd("ip link del PortChannel" + lag_id) self.dvs_vlan.create_vlan_member(vlan, lag_interface, "tagged") + + self.dvs_vlan.get_and_verify_vlan_member_ids(0) + #Start teamsyncd + dvs.start_teamsyncd() + + #Start teammgrd + dvs.restart_teammgrd() + self.dvs_vlan.get_and_verify_vlan_member_ids(1) self.dvs_vlan.remove_vlan_member(vlan, lag_interface) @@ -249,6 +315,8 @@ def test_AddPortChannelToVlan(self, dvs): self.dvs_lag.remove_port_channel(lag_id) self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 0) + + def test_AddVlanMemberWithNonExistVlan(self, dvs): vlan = "2" @@ -534,6 +602,50 @@ def test_VlanMemberLinkDown(self, dvs): self.dvs_vlan.remove_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(0) + def test_MacMatchesLinkLocalIPv6(self, dvs): + """ + Checks whether the MAC addresses assigned to the Bridge, dummy, Vlan1000 and Ethernet4 + interfaces match their corresponding interface's link-local IPv6 address. + """ + dvs.setup_db() + + vlan_id = "1000" + vlan_member = "Ethernet4" + vlan_interface = f"Vlan{vlan_id}" + vlan_mac = "00:aa:bb:cc:dd:ee" + + assert dvs.get_interface_oper_status("Bridge") == "UP" + assert dvs.get_interface_oper_status("dummy") != "DOWN" + + bridge_mac = dvs.get_interface_mac("Bridge") + assert mac_to_link_local_ipv6(bridge_mac) == dvs.get_interface_link_local_ipv6("Bridge") + + dummy_mac = dvs.get_interface_mac("dummy") + assert mac_to_link_local_ipv6(dummy_mac) == dvs.get_interface_link_local_ipv6("dummy") + + self.dvs_vlan.create_vlan_with_mac(vlan_id, vlan_mac) + time.sleep(1) + assert dvs.get_interface_oper_status(vlan_interface) == "UP" + # The MAC address of the Bridge is expected to have changed, so we need to check again. + bridge_mac = dvs.get_interface_mac("Bridge") + assert mac_to_link_local_ipv6(bridge_mac) == dvs.get_interface_link_local_ipv6("Bridge") + vlan_mac = dvs.get_interface_mac(vlan_interface) + assert mac_to_link_local_ipv6(vlan_mac) == dvs.get_interface_link_local_ipv6(vlan_interface) + + self.dvs_vlan.create_vlan_member(vlan_interface, vlan_member) + time.sleep(1) + dvs.set_interface_status(vlan_member, "up") + assert dvs.get_interface_oper_status(vlan_member) != "DOWN" + member_mac = dvs.get_interface_mac(vlan_member) + assert mac_to_link_local_ipv6(member_mac) == dvs.get_interface_link_local_ipv6(vlan_member) + + # Tear down + dvs.set_interface_status(vlan_member, "down") + self.dvs_vlan.remove_vlan_member(vlan_interface, vlan_member) + self.dvs_vlan.remove_vlan(vlan_interface) + time.sleep(1) + + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vxlan_tunnel.py b/tests/test_vxlan_tunnel.py index d296fcc741..82de242fb8 100644 --- a/tests/test_vxlan_tunnel.py +++ b/tests/test_vxlan_tunnel.py @@ -152,7 +152,9 @@ def check_vxlan_tunnel(dvs, src_ip, dst_ip, tunnel_map_ids, tunnel_map_entry_ids 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': decapstr, 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': encapstr, 'SAI_TUNNEL_ATTR_PEER_MODE': 'SAI_TUNNEL_PEER_MODE_P2MP', - 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip + 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': 'SAI_TUNNEL_TTL_MODE_PIPE_MODEL', + 'SAI_TUNNEL_ATTR_ENCAP_TTL_VAL': '255' } ) diff --git a/tests/test_zmq.py b/tests/test_zmq.py index 0f7e5359f3..6312c443d2 100644 --- a/tests/test_zmq.py +++ b/tests/test_zmq.py @@ -120,3 +120,37 @@ def test_vrf(self, dvs): dvs.runcmd("cp /usr/bin/orchagent.sh_vrf_ut_backup /usr/bin/orchagent.sh") dvs.stop_swss() dvs.start_swss() + + def test_heartbeat(self, dvs): + # Improve test code coverage, change orchagent to disable heartbeat + dvs.runcmd("cp /usr/bin/orchagent.sh /usr/bin/orchagent.sh_hb_ut_backup") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -I 0 /g' /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + # wait orchagent start + time.sleep(3) + process_statue = dvs.runcmd("ps -ef") + zmq_logger.debug("Process status: {}".format(process_statue)) + + # revert change + dvs.runcmd("cp /usr/bin/orchagent.sh_hb_ut_backup /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + def test_usage(self, dvs): + # Improve test code coverage, change orchagent to display usage + dvs.runcmd("cp /usr/bin/orchagent.sh /usr/bin/orchagent.sh_usage_ut_backup") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -h /g' /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + # wait orchagent start + time.sleep(3) + process_statue = dvs.runcmd("ps -ef") + zmq_logger.debug("Process status: {}".format(process_statue)) + + # revert change + dvs.runcmd("cp /usr/bin/orchagent.sh_usage_ut_backup /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss()