diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index 0bd769222d..3ac3408649 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -47,7 +47,7 @@ jobs: vmImage: 'ubuntu-20.04' variables: - DIFF_COVER_CHECK_THRESHOLD: 50 + DIFF_COVER_CHECK_THRESHOLD: 80 DIFF_COVER_ENABLE: 'true' container: diff --git a/.gitignore b/.gitignore index 13debec21a..04c3b514c7 100644 --- a/.gitignore +++ b/.gitignore @@ -74,6 +74,7 @@ swssconfig/swssplayer tlm_teamd/tlm_teamd teamsyncd/teamsyncd tests/tests +tests/mock_tests/tests_response_publisher tests/mock_tests/tests_fpmsyncd diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 5b2d5a0d17..cd0ce72718 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -3141,8 +3141,6 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string return task_process_status::task_failed; } - // TODO: check overlap. Currently, assume there is no overlap - auto &portQueue = m_portQueueLookup[port][queues]; if (PORT_ADMIN_DOWN == portInfo.state) { diff --git a/cfgmgr/coppmgr.cpp b/cfgmgr/coppmgr.cpp index 1721cc8593..5595096a27 100644 --- a/cfgmgr/coppmgr.cpp +++ b/cfgmgr/coppmgr.cpp @@ -9,6 +9,8 @@ #include "shellcmd.h" #include "warm_restart.h" #include "json.hpp" +#include +#include using json = nlohmann::json; @@ -255,6 +257,42 @@ void CoppMgr::mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &fvs) +{ + /* Compare with the existing contents of copp tables, in case for a key K preserved fvs are the same + * as the fvs in trap_group_fvs it will be ignored as a duplicate continue to next key. + * In case one of the fvs differs the preserved entry will be deleted and new entry will be set instead. + */ + std::vector preserved_fvs; + bool key_found = m_coppTable.get(key, preserved_fvs); + if (!key_found) + { + return false; + } + else + { + unordered_map preserved_copp_entry; + for (auto prev_fv : preserved_fvs) + { + preserved_copp_entry[fvField(prev_fv)] = fvValue(prev_fv); + } + for (auto fv: fvs) + { + string field = fvField(fv); + string value = fvValue(fv); + auto preserved_copp_it = preserved_copp_entry.find(field); + bool field_found = (preserved_copp_it != preserved_copp_entry.end()); + if ((!field_found) || (field_found && preserved_copp_it->second.compare(value))) + { + // overwrite -> delete preserved entry from copp table and set a new entry instead + m_coppTable.del(key); + return false; + } + } + } + return true; +} + CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgCoppTrapTable(cfgDb, CFG_COPP_TRAP_TABLE_NAME), @@ -270,9 +308,11 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c std::vector group_keys; std::vector trap_keys; std::vector feature_keys; + std::vector preserved_copp_keys; std::vector group_cfg_keys; std::vector trap_cfg_keys; + unordered_set supported_copp_keys; CoppCfg group_cfg; CoppCfg trap_cfg; @@ -280,6 +320,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_cfgCoppGroupTable.getKeys(group_cfg_keys); m_cfgCoppTrapTable.getKeys(trap_cfg_keys); m_cfgFeatureTable.getKeys(feature_keys); + m_coppTable.getKeys(preserved_copp_keys); for (auto i: feature_keys) @@ -352,8 +393,14 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c if (!trap_group_fvs.empty()) { + supported_copp_keys.emplace(i.first); + if (isDupEntry(i.first, trap_group_fvs)) + { + continue; + } m_appCoppTable.set(i.first, trap_group_fvs); } + setCoppGroupStateOk(i.first); auto g_cfg = std::find(group_cfg_keys.begin(), group_cfg_keys.end(), i.first); if (g_cfg != group_cfg_keys.end()) @@ -361,6 +408,16 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c g_copp_init_set.insert(i.first); } } + + // Delete unsupported keys from preserved copp tables + for (auto it : preserved_copp_keys) + { + auto copp_it = supported_copp_keys.find(it); + if (copp_it == supported_copp_keys.end()) + { + m_coppTable.del(it); + } + } } void CoppMgr::setCoppGroupStateOk(string alias) diff --git a/cfgmgr/coppmgr.h b/cfgmgr/coppmgr.h index 1d53756fce..44549d3bec 100644 --- a/cfgmgr/coppmgr.h +++ b/cfgmgr/coppmgr.h @@ -100,6 +100,7 @@ class CoppMgr : public Orch bool isTrapGroupInstalled(std::string key); bool isFeatureEnabled(std::string feature); void mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &cfg_keys, Table &cfgTable); + bool isDupEntry(const std::string &key, std::vector &fvs); void removeTrap(std::string key); void addTrap(std::string trap_ids, std::string trap_group); diff --git a/cfgmgr/intfmgrd.cpp b/cfgmgr/intfmgrd.cpp index 9ed3653333..d07cb9af78 100644 --- a/cfgmgr/intfmgrd.cpp +++ b/cfgmgr/intfmgrd.cpp @@ -62,8 +62,6 @@ int main(int argc, char **argv) WarmStart::checkWarmStart("intfmgrd", "swss"); IntfMgr intfmgr(&cfgDb, &appDb, &stateDb, cfg_intf_tables); - - // TODO: add tables in stateDB which interface depends on to monitor list std::vector cfgOrchList = {&intfmgr}; swss::Select s; diff --git a/cfgmgr/portmgrd.cpp b/cfgmgr/portmgrd.cpp index 180bbc1d63..944a881d06 100644 --- a/cfgmgr/portmgrd.cpp +++ b/cfgmgr/portmgrd.cpp @@ -53,8 +53,6 @@ int main(int argc, char **argv) DBConnector stateDb("STATE_DB", 0); PortMgr portmgr(&cfgDb, &appDb, &stateDb, cfg_port_tables); - - // TODO: add tables in stateDB which interface depends on to monitor list vector cfgOrchList = {&portmgr}; swss::Select s; diff --git a/cfgmgr/vrfmgrd.cpp b/cfgmgr/vrfmgrd.cpp index 735e59191d..c7ca49b6bc 100644 --- a/cfgmgr/vrfmgrd.cpp +++ b/cfgmgr/vrfmgrd.cpp @@ -64,7 +64,6 @@ int main(int argc, char **argv) isWarmStart = WarmStart::isWarmStart(); - // TODO: add tables in stateDB which interface depends on to monitor list std::vector cfgOrchList = {&vrfmgr}; swss::Select s; diff --git a/fpmsyncd/routesync.cpp b/fpmsyncd/routesync.cpp index ca00d4f77d..e87b9fa323 100644 --- a/fpmsyncd/routesync.cpp +++ b/fpmsyncd/routesync.cpp @@ -733,6 +733,32 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) { SWSS_LOG_DEBUG("Skip routes to eth0 or docker0: %s %s %s", destipprefix, gw_list.c_str(), intf_list.c_str()); + // If intf_list has only this interface, that means all of the next hops of this route + // have been removed and the next hop on the eth0/docker0 has become the only next hop. + // In this case since we do not want the route with next hop on eth0/docker0, we return. + // But still we need to clear the route from the APPL_DB. Otherwise the APPL_DB and data + // path will be left with stale route entry + if(alsv.size() == 1) + { + if (!warmRestartInProgress) + { + SWSS_LOG_NOTICE("RouteTable del msg for route with only one nh on eth0/docker0: %s %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + + m_routeTable.del(destipprefix); + } + else + { + SWSS_LOG_NOTICE("Warm-Restart mode: Receiving delete msg for route with only nh on eth0/docker0: %s %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + + vector fvVector; + const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, + DEL_COMMAND, + fvVector); + m_warmStartHelper.insertRefreshMap(kfv); + } + } return; } } diff --git a/neighsyncd/neighsync.cpp b/neighsyncd/neighsync.cpp index 8f208e73fe..46f51b9266 100644 --- a/neighsyncd/neighsync.cpp +++ b/neighsyncd/neighsync.cpp @@ -143,6 +143,12 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); } + if (!delete_key && !strncmp(macStr, "none", MAX_ADDR_SIZE)) + { + SWSS_LOG_NOTICE("Mac address is 'none' for ADD op, ignoring for %s", ipStr); + return; + } + /* Ignore neighbor entries with Broadcast Mac - Trigger for directed broadcast */ if (!delete_key && (MacAddress(macStr) == MacAddress("ff:ff:ff:ff:ff:ff"))) { diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index c57c7e8864..52bf6d35a5 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -70,7 +70,9 @@ acl_rule_attr_lookup_t aclMatchLookup = { MATCH_INNER_ETHER_TYPE, SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE }, { MATCH_INNER_IP_PROTOCOL, SAI_ACL_ENTRY_ATTR_FIELD_INNER_IP_PROTOCOL }, { MATCH_INNER_L4_SRC_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT }, - { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT } + { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT }, + { MATCH_BTH_OPCODE, SAI_ACL_ENTRY_ATTR_FIELD_BTH_OPCODE}, + { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME} }; static acl_range_type_lookup_t aclRangeTypeLookup = @@ -971,6 +973,36 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) matchData.data.u8 = to_uint(attr_value); matchData.mask.u8 = 0xFF; } + else if (attr_name == MATCH_BTH_OPCODE) + { + auto opcode_data = tokenize(attr_value, '/'); + + if (opcode_data.size() == 2) + { + matchData.data.u8 = to_uint(opcode_data[0]); + matchData.mask.u8 = to_uint(opcode_data[1]); + } + else + { + SWSS_LOG_ERROR("Invalid BTH_OPCODE configuration: %s, expected format /", attr_value.c_str()); + return false; + } + } + else if (attr_name == MATCH_AETH_SYNDROME) + { + auto syndrome_data = tokenize(attr_value, '/'); + + if (syndrome_data.size() == 2) + { + matchData.data.u8 = to_uint(syndrome_data[0]); + matchData.mask.u8 = to_uint(syndrome_data[1]); + } + else + { + SWSS_LOG_ERROR("Invalid AETH_SYNDROME configuration: %s, expected format /", attr_value.c_str()); + return false; + } + } } catch (exception &e) { @@ -3800,7 +3832,7 @@ bool AclOrch::addAclTable(AclTable &newTable) } // Update matching field according to ACL stage newTable.addStageMandatoryMatchFields(); - + // Add mandatory ACL action if not present // We need to call addMandatoryActions here because addAclTable is directly called in other orchs. // The action_list is already added if the ACL table creation is triggered by CONFIGDD, but calling addMandatoryActions diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index 4972ec1ac2..c62a68991a 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -49,6 +49,8 @@ #define MATCH_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" #define MATCH_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" #define MATCH_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define MATCH_BTH_OPCODE "BTH_OPCODE" +#define MATCH_AETH_SYNDROME "AETH_SYNDROME" #define BIND_POINT_TYPE_PORT "PORT" #define BIND_POINT_TYPE_PORTCHANNEL "PORTCHANNEL" diff --git a/orchagent/bfdorch.cpp b/orchagent/bfdorch.cpp index e3cab2581a..f242645b35 100644 --- a/orchagent/bfdorch.cpp +++ b/orchagent/bfdorch.cpp @@ -13,7 +13,7 @@ using namespace swss; #define BFD_SESSION_DEFAULT_TX_INTERVAL 1000 #define BFD_SESSION_DEFAULT_RX_INTERVAL 1000 -#define BFD_SESSION_DEFAULT_DETECT_MULTIPLIER 3 +#define BFD_SESSION_DEFAULT_DETECT_MULTIPLIER 10 #define BFD_SESSION_MILLISECOND_TO_MICROSECOND 1000 #define BFD_SRCPORTINIT 49152 #define BFD_SRCPORTMAX 65536 @@ -306,9 +306,11 @@ bool BfdOrch::create_bfd_session(const string& key, const vectorhandleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); if (handle_status != task_success) { - return gCbfNhgOrch->parseHandleSaiStatusFailure(handle_status); + return parseHandleSaiStatusFailure(handle_status); } } diff --git a/orchagent/crmorch.cpp b/orchagent/crmorch.cpp index f02bbd68f2..44be1e85b8 100644 --- a/orchagent/crmorch.cpp +++ b/orchagent/crmorch.cpp @@ -4,6 +4,7 @@ #include "crmorch.h" #include "converter.h" #include "timer.h" +#include "saihelper.h" #define CRM_POLLING_INTERVAL "polling_interval" #define CRM_COUNTERS_TABLE_KEY "STATS" diff --git a/orchagent/dash/dashorch.h b/orchagent/dash/dashorch.h index df2048e324..d0b456f0c2 100644 --- a/orchagent/dash/dashorch.h +++ b/orchagent/dash/dashorch.h @@ -14,6 +14,7 @@ #include "macaddress.h" #include "timer.h" #include "dashorch.h" +#include "saihelper.h" struct ApplianceEntry { diff --git a/orchagent/dash/dashvnetorch.cpp b/orchagent/dash/dashvnetorch.cpp index d293756f7a..e9fc357f26 100644 --- a/orchagent/dash/dashvnetorch.cpp +++ b/orchagent/dash/dashvnetorch.cpp @@ -17,6 +17,7 @@ #include "saiextensions.h" #include "swssnet.h" #include "tokenize.h" +#include "dashorch.h" using namespace std; using namespace swss; diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index 54d815aaaa..5fabdfc8f5 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -9,6 +9,7 @@ #include "schema.h" #include "sai_serialize.h" #include "timer.h" +#include "saihelper.h" #define FABRIC_POLLING_INTERVAL_DEFAULT (30) #define FABRIC_PORT_PREFIX "PORT" diff --git a/orchagent/main.cpp b/orchagent/main.cpp index 7c953f512b..0d39b891a6 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -126,7 +126,7 @@ void syncd_apply_view() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to notify syncd APPLY_VIEW %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } } @@ -170,9 +170,17 @@ void getCfgSwitchType(DBConnector *cfgDb, string &switch_type) { Table cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); - if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", switch_type)) + try + { + if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", switch_type)) + { + //Switch type is not configured. Consider it default = "switch" (regular switch) + switch_type = "switch"; + } + } + catch(const std::system_error& e) { - //Switch type is not configured. Consider it default = "switch" (regular switch) + SWSS_LOG_ERROR("System error: %s", e.what()); switch_type = "switch"; } @@ -196,64 +204,72 @@ bool getSystemPortConfigList(DBConnector *cfgDb, DBConnector *appDb, vectorsetFabricEnabled(true); - orchDaemon->setFabricPortStatEnabled(true); - orchDaemon->setFabricQueueStatEnabled(true); + // SAI doesn't fully support counters for non fabric asics + orchDaemon->setFabricPortStatEnabled(false); + orchDaemon->setFabricQueueStatEnabled(false); } } else diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index c607c9390f..3fffd18946 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -567,6 +567,8 @@ void MuxCable::updateNeighbor(NextHopKey nh, bool add) void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, MuxState state) { + uint32_t num_routes = 0; + SWSS_LOG_INFO("Neigh %s on %s, add %d, state %d", nh.ip_address.to_string().c_str(), nh.alias.c_str(), add, state); @@ -592,6 +594,7 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu case MuxState::MUX_STATE_ACTIVE: neighbors_[nh.ip_address] = gNeighOrch->getLocalNextHopId(nh); gNeighOrch->enableNeighbor(nh); + gRouteOrch->updateNextHopRoutes(nh, num_routes); break; case MuxState::MUX_STATE_STANDBY: neighbors_[nh.ip_address] = tunnelId; diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 562793b5f7..5207b2430c 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -591,6 +591,14 @@ void NeighOrch::decreaseNextHopRefCount(const NextHopKey &nexthop, uint32_t coun assert(hasNextHop(nexthop)); if (m_syncdNextHops.find(nexthop) != m_syncdNextHops.end()) { + if ((m_syncdNextHops[nexthop].ref_count - (int)count) < 0) + { + SWSS_LOG_ERROR("Ref count cannot be negative for next_hop_id: 0x%" PRIx64 " with ip: %s and alias: %s", + m_syncdNextHops[nexthop].next_hop_id, nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + // Reset refcount to 0 to match expected value + m_syncdNextHops[nexthop].ref_count = 0; + return; + } m_syncdNextHops[nexthop].ref_count -= count; } } @@ -912,15 +920,18 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress } else if (isHwConfigured(neighborEntry)) { - status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &neighbor_attr); - if (status != SAI_STATUS_SUCCESS) + for (auto itr : neighbor_attrs) { - SWSS_LOG_ERROR("Failed to update neighbor %s on %s, rv:%d", - macAddress.to_string().c_str(), alias.c_str(), status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); - if (handle_status != task_success) + status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &itr); + if (status != SAI_STATUS_SUCCESS) { - return parseHandleSaiStatusFailure(handle_status); + SWSS_LOG_ERROR("Failed to update neighbor %s on %s, attr.id=0x%x, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), itr.id, status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } } SWSS_LOG_NOTICE("Updated neighbor %s on %s", macAddress.to_string().c_str(), alias.c_str()); diff --git a/orchagent/nhgbase.h b/orchagent/nhgbase.h index 65f0690555..1dbf2f7762 100644 --- a/orchagent/nhgbase.h +++ b/orchagent/nhgbase.h @@ -451,11 +451,6 @@ class NhgOrchCommon : public Orch } inline void decSyncedNhgCount() { NhgBase::decSyncedCount(); } - /* Handling SAI status*/ - using Orch::handleSaiCreateStatus; - using Orch::handleSaiRemoveStatus; - using Orch::parseHandleSaiStatusFailure; - protected: /* * Map of synced next hop groups. diff --git a/orchagent/nhgorch.cpp b/orchagent/nhgorch.cpp index 32ddb27eb5..cefc2efbb1 100644 --- a/orchagent/nhgorch.cpp +++ b/orchagent/nhgorch.cpp @@ -576,10 +576,10 @@ bool NextHopGroup::sync() SWSS_LOG_ERROR("Failed to create next hop group %s, rv:%d", m_key.to_string().c_str(), status); - task_process_status handle_status = gNhgOrch->handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); if (handle_status != task_success) { - return gNhgOrch->parseHandleSaiStatusFailure(handle_status); + return parseHandleSaiStatusFailure(handle_status); } } diff --git a/orchagent/notifications.cpp b/orchagent/notifications.cpp index 1a49526370..297b8d4850 100644 --- a/orchagent/notifications.cpp +++ b/orchagent/notifications.cpp @@ -23,7 +23,7 @@ void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notificat // which causes concurrency access to the DB } -void on_switch_shutdown_request() +void on_switch_shutdown_request(sai_object_id_t switch_id) { SWSS_LOG_ENTER(); diff --git a/orchagent/notifications.h b/orchagent/notifications.h index ea22593a1f..61e8422db0 100644 --- a/orchagent/notifications.h +++ b/orchagent/notifications.h @@ -7,4 +7,7 @@ extern "C" { void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data); void on_port_state_change(uint32_t count, sai_port_oper_status_notification_t *data); void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data); -void on_switch_shutdown_request(); + +// The function prototype information can be found here: +// https://github.com/sonic-net/sonic-sairedis/blob/master/meta/NotificationSwitchShutdownRequest.cpp#L49 +void on_switch_shutdown_request(sai_object_id_t switch_id); diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 8a4049583a..5690d85dd4 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -562,6 +562,11 @@ void Orch::dumpPendingTasks(vector &ts) } } +void Orch::flushResponses() +{ + m_publisher.flush(); +} + void Orch::logfileReopen() { gRecordOfs.close(); @@ -856,193 +861,6 @@ Executor *Orch::getExecutor(string executorName) return NULL; } -task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis create - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_ITEM_ALREADY_EXISTS) - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (api) - { - case SAI_API_FDB: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - case SAI_STATUS_ITEM_ALREADY_EXISTS: - /* - * In FDB creation, there are scenarios where the hardware learns an FDB entry before orchagent. - * In such cases, the FDB SAI creation would report the status of SAI_STATUS_ITEM_ALREADY_EXISTS, - * and orchagent should ignore the error and treat it as entry was explicitly created. - */ - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - break; - case SAI_API_HOSTIF: - switch (status) - { - case SAI_STATUS_SUCCESS: - return task_success; - case SAI_STATUS_FAILURE: - /* - * Host interface maybe failed due to lane not available. - * In some scenarios, like SONiC virtual machine, the invalid lane may be not enabled by VM configuration, - * So just ignore the failure and report an error log. - */ - return task_ignore; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - default: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - } - return task_need_retry; -} - -task_process_status Orch::handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis set - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - if (status == SAI_STATUS_SUCCESS) - { - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); - return task_success; - } - - switch (api) - { - case SAI_API_PORT: - switch (status) - { - case SAI_STATUS_INVALID_ATTR_VALUE_0: - /* - * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task - * and let user correct the configuration. - */ - SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - return task_failed; - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - - return task_need_retry; -} - -task_process_status Orch::handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis remove - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_OBJECT_IN_USE, - * SAI_STATUS_ITEM_NOT_FOUND) - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - abort(); - } - return task_need_retry; -} - -task_process_status Orch::handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis get - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiGetStatus"); - return task_success; - case SAI_STATUS_NOT_IMPLEMENTED: - SWSS_LOG_ERROR("Encountered failure in get operation due to the function is not implemented, exiting orchagent, SAI API: %s", - sai_serialize_api(api).c_str()); - throw std::logic_error("SAI get function not implemented"); - default: - SWSS_LOG_ERROR("Encountered failure in get operation, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - } - return task_failed; -} - -bool Orch::parseHandleSaiStatusFailure(task_process_status status) -{ - /* - * This function parses task process status from SAI failure handling function to whether a retry is needed. - * Return value: true - no retry is needed. - * false - retry is needed. - */ - switch (status) - { - case task_need_retry: - return false; - case task_failed: - return true; - default: - SWSS_LOG_WARN("task_process_status %d is not expected in parseHandleSaiStatusFailure", status); - } - return true; -} - void Orch2::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/orch.h b/orchagent/orch.h index 7e803bbd93..efee98a73c 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -223,6 +223,11 @@ class Orch static void recordTuple(Consumer &consumer, const swss::KeyOpFieldsValuesTuple &tuple); void dumpPendingTasks(std::vector &ts); + + /** + * @brief Flush pending responses + */ + void flushResponses(); protected: ConsumerMap m_consumerMap; @@ -246,13 +251,6 @@ class Orch void addExecutor(Executor* executor); Executor *getExecutor(std::string executorName); - /* Handling SAI status*/ - virtual task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - bool parseHandleSaiStatusFailure(task_process_status status); - ResponsePublisher m_publisher; private: void addConsumer(swss::DBConnector *db, std::string tableName, int pri = default_orch_pri); diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 8a772de77e..fc9e8d2545 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -714,7 +714,12 @@ void OrchDaemon::flush() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to flush redis pipeline %d", status); - abort(); + handleSaiFailure(true); + } + + for (auto* orch: m_orchList) + { + orch->flushResponses(); } } diff --git a/orchagent/p4orch/tests/test_main.cpp b/orchagent/p4orch/tests/test_main.cpp index 91b4296a29..08b72a9377 100644 --- a/orchagent/p4orch/tests/test_main.cpp +++ b/orchagent/p4orch/tests/test_main.cpp @@ -80,6 +80,32 @@ sai_my_mac_api_t *sai_my_mac_api; sai_counter_api_t *sai_counter_api; sai_generic_programmable_api_t *sai_generic_programmable_api; +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +bool parseHandleSaiStatusFailure(task_process_status status) +{ + return true; +} + + namespace { diff --git a/orchagent/port.h b/orchagent/port.h index a5e003584b..686b82d757 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -113,7 +113,7 @@ class Port } std::string m_alias; - Type m_type; + Type m_type = UNKNOWN; int m_index = 0; // PHY_PORT: index uint32_t m_mtu = DEFAULT_MTU; uint32_t m_speed = 0; // Mbps diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 85369b9d1a..2a5cb3aabb 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2702,6 +2702,13 @@ bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string m_portListLaneMap[lane_set] = port_id; m_portCount++; + // newly created ports might be put in the default vlan so remove all ports from + // the default vlan. + if (gMySwitchType == "voq") { + removeDefaultVlanMembers(); + removeDefaultBridgePorts(); + } + SWSS_LOG_NOTICE("Create port %" PRIx64 " with the speed %u", port_id, speed); return true; @@ -2719,7 +2726,11 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) */ if (getPort(port_id, port)) { - setPortAdminStatus(port, false); + /* Bring port down before removing port */ + if (!setPortAdminStatus(port, false)) + { + SWSS_LOG_ERROR("Failed to set admin status to DOWN to remove port %" PRIx64, port_id); + } } /* else : port is in default state or not yet created */ @@ -3590,9 +3601,9 @@ void PortsOrch::doPortTask(Consumer &consumer) continue; } - if (adv_interface_types != p.m_adv_interface_types && p.m_autoneg == 1) + if (adv_interface_types != p.m_adv_interface_types) { - if (p.m_admin_state_up) + if (p.m_admin_state_up && p.m_autoneg == 1) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) @@ -4463,7 +4474,7 @@ void PortsOrch::doTask() APP_LAG_TABLE_NAME, APP_LAG_MEMBER_TABLE_NAME, APP_VLAN_TABLE_NAME, - APP_VLAN_MEMBER_TABLE_NAME, + APP_VLAN_MEMBER_TABLE_NAME }; for (auto tableName: tableOrder) diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 2f8378d284..515d591e00 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -1338,11 +1338,6 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpField attr.value.u8 = (uint8_t)stoi(fvValue(*i)); sai_attr_list.push_back(attr); } - else if (fvField(*i) == scheduler_priority_field_name) - { - // TODO: The meaning is to be able to adjust priority of the given scheduler group. - // However currently SAI model does not provide such ability. - } else if (fvField(*i) == scheduler_meter_type_field_name) { sai_meter_type_t meter_value = scheduler_meter_map.at(fvValue(*i)); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index b5e2e1ad86..f677e68a01 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -44,7 +44,6 @@ const string scheduler_algo_DWRR = "DWRR"; const string scheduler_algo_WRR = "WRR"; const string scheduler_algo_STRICT = "STRICT"; const string scheduler_weight_field_name = "weight"; -const string scheduler_priority_field_name = "priority"; const string scheduler_meter_type_field_name = "meter_type"; const string scheduler_min_bandwidth_rate_field_name = "cir";//Committed Information Rate const string scheduler_min_bandwidth_burst_rate_field_name = "cbs";//Committed Burst Size diff --git a/orchagent/response_publisher.cpp b/orchagent/response_publisher.cpp index 5d0490167c..169075faa4 100644 --- a/orchagent/response_publisher.cpp +++ b/orchagent/response_publisher.cpp @@ -90,7 +90,10 @@ void RecordResponse(const std::string &response_channel, const std::string &key, } // namespace -ResponsePublisher::ResponsePublisher() : m_db("APPL_STATE_DB", 0) +ResponsePublisher::ResponsePublisher(bool buffered) : + m_db(std::make_unique("APPL_STATE_DB", 0)), + m_pipe(std::make_unique(m_db.get())), + m_buffered(buffered) { } @@ -107,17 +110,14 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key } std::string response_channel = "APPL_DB_" + table + "_RESPONSE_CHANNEL"; - if (m_notifiers.find(table) == m_notifiers.end()) - { - m_notifiers[table] = std::make_unique(&m_db, response_channel); - } + swss::NotificationProducer notificationProducer{m_pipe.get(), response_channel, m_buffered}; auto intent_attrs_copy = intent_attrs; // Add error message as the first field-value-pair. swss::FieldValueTuple err_str("err_str", PrependedComponent(status) + status.message()); intent_attrs_copy.insert(intent_attrs_copy.begin(), err_str); // Sends the response to the notification channel. - m_notifiers[table]->send(status.codeStr(), key, intent_attrs_copy); + notificationProducer.send(status.codeStr(), key, intent_attrs_copy); RecordResponse(response_channel, key, intent_attrs_copy, status.codeStr()); } @@ -140,17 +140,14 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key void ResponsePublisher::writeToDB(const std::string &table, const std::string &key, const std::vector &values, const std::string &op, bool replace) { - if (m_tables.find(table) == m_tables.end()) - { - m_tables[table] = std::make_unique(&m_db, table); - } + swss::Table applStateTable{m_pipe.get(), table, m_buffered}; auto attrs = values; if (op == SET_COMMAND) { if (replace) { - m_tables[table]->del(key); + applStateTable.del(key); } if (!values.size()) { @@ -160,9 +157,9 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k // Write to DB only if the key does not exist or non-NULL attributes are // being written to the entry. std::vector fv; - if (!m_tables[table]->get(key, fv)) + if (!applStateTable.get(key, fv)) { - m_tables[table]->set(key, attrs); + applStateTable.set(key, attrs); RecordDBWrite(table, key, attrs, op); return; } @@ -179,13 +176,23 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k } if (attrs.size()) { - m_tables[table]->set(key, attrs); + applStateTable.set(key, attrs); RecordDBWrite(table, key, attrs, op); } } else if (op == DEL_COMMAND) { - m_tables[table]->del(key); + applStateTable.del(key); RecordDBWrite(table, key, {}, op); } } + +void ResponsePublisher::flush() +{ + m_pipe->flush(); +} + +void ResponsePublisher::setBuffered(bool buffered) +{ + m_buffered = buffered; +} diff --git a/orchagent/response_publisher.h b/orchagent/response_publisher.h index cd688112e8..db882d9c70 100644 --- a/orchagent/response_publisher.h +++ b/orchagent/response_publisher.h @@ -16,7 +16,8 @@ class ResponsePublisher : public ResponsePublisherInterface { public: - explicit ResponsePublisher(); + explicit ResponsePublisher(bool buffered = false); + virtual ~ResponsePublisher() = default; // Intent attributes are the attributes sent in the notification into the @@ -42,10 +43,21 @@ class ResponsePublisher : public ResponsePublisherInterface void writeToDB(const std::string &table, const std::string &key, const std::vector &values, const std::string &op, bool replace = false) override; + /** + * @brief Flush pending responses + */ + void flush(); + + /** + * @brief Set buffering mode + * + * @param buffered Flag whether responses are buffered + */ + void setBuffered(bool buffered); + private: - swss::DBConnector m_db; - // Maps table names to tables. - std::unordered_map> m_tables; - // Maps table names to notifiers. - std::unordered_map> m_notifiers; + std::unique_ptr m_db; + std::unique_ptr m_pipe; + + bool m_buffered{false}; }; diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index 6184c7fb36..d01f1ddf95 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -483,3 +483,231 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) return status; } +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis create + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_ITEM_ALREADY_EXISTS) + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (api) + { + case SAI_API_FDB: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + /* + * In FDB creation, there are scenarios where the hardware learns an FDB entry before orchagent. + * In such cases, the FDB SAI creation would report the status of SAI_STATUS_ITEM_ALREADY_EXISTS, + * and orchagent should ignore the error and treat it as entry was explicitly created. + */ + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_HOSTIF: + switch (status) + { + case SAI_STATUS_SUCCESS: + return task_success; + case SAI_STATUS_FAILURE: + /* + * Host interface maybe failed due to lane not available. + * In some scenarios, like SONiC virtual machine, the invalid lane may be not enabled by VM configuration, + * So just ignore the failure and report an error log. + */ + return task_ignore; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + } + return task_need_retry; +} + +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis set + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + if (status == SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); + return task_success; + } + + switch (api) + { + case SAI_API_PORT: + switch (status) + { + case SAI_STATUS_INVALID_ATTR_VALUE_0: + /* + * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task + * and let user correct the configuration. + */ + SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_TUNNEL: + switch (status) + { + case SAI_STATUS_ATTR_NOT_SUPPORTED_0: + SWSS_LOG_ERROR("Encountered SAI_STATUS_ATTR_NOT_SUPPORTED_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + + return task_need_retry; +} + +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis remove + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_OBJECT_IN_USE, + * SAI_STATUS_ITEM_NOT_FOUND) + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + return task_need_retry; +} + +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis get + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiGetStatus"); + return task_success; + case SAI_STATUS_NOT_IMPLEMENTED: + SWSS_LOG_ERROR("Encountered failure in get operation due to the function is not implemented, exiting orchagent, SAI API: %s", + sai_serialize_api(api).c_str()); + throw std::logic_error("SAI get function not implemented"); + default: + SWSS_LOG_ERROR("Encountered failure in get operation, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + } + return task_failed; +} + +bool parseHandleSaiStatusFailure(task_process_status status) +{ + /* + * This function parses task process status from SAI failure handling function to whether a retry is needed. + * Return value: true - no retry is needed. + * false - retry is needed. + */ + switch (status) + { + case task_need_retry: + return false; + case task_failed: + return true; + default: + SWSS_LOG_WARN("task_process_status %d is not expected in parseHandleSaiStatusFailure", status); + } + return true; +} + +/* Handling SAI failure. Request redis to invoke SAI failure dump and abort if set*/ +void handleSaiFailure(bool abort_on_failure) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD; + attr.value.s32 = SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP; + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to take sai failure dump %d", status); + } + if (abort_on_failure) + { + abort(); + } +} diff --git a/orchagent/saihelper.h b/orchagent/saihelper.h index a0b2aa2fac..c7ff8d23ea 100644 --- a/orchagent/saihelper.h +++ b/orchagent/saihelper.h @@ -3,6 +3,7 @@ #include "gearboxutils.h" #include +#include "orch.h" #define IS_ATTR_ID_IN_RANGE(attrId, objectType, attrPrefix) \ ((attrId) >= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _START && (attrId) <= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _END) @@ -10,3 +11,11 @@ void initSaiApi(); void initSaiRedis(const std::string &record_location, const std::string &record_filename); sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy); + +/* Handling SAI status*/ +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +bool parseHandleSaiStatusFailure(task_process_status status); +void handleSaiFailure(bool abort_on_failure); diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 951358774f..b5aed63adc 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -9,6 +9,7 @@ #include "notificationproducer.h" #include "macaddress.h" #include "return_code.h" +#include "saihelper.h" using namespace std; using namespace swss; diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index e84ba315c4..065e78a0c0 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -144,7 +144,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ecn_mode, tunnel_id); + SWSS_LOG_NOTICE("Skip setting ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_DECAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "encap_ecn_mode") @@ -158,7 +160,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), encap_ecn_mode, tunnel_id); + SWSS_LOG_NOTICE("Skip setting encap_ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_ENCAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "ttl_mode") @@ -582,30 +586,6 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ sai_attribute_t attr; - if (field == "ecn_mode") - { - // decap ecn mode (copy from outer/standard) - attr.id = SAI_TUNNEL_ATTR_DECAP_ECN_MODE; - if (value == "copy_from_outer") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER; - } - else if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_STANDARD; - } - } - - if (field == "encap_ecn_mode") - { - // encap ecn mode (only standard is supported) - attr.id = SAI_TUNNEL_ATTR_ENCAP_ECN_MODE; - if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_ENCAP_ECN_MODE_STANDARD; - } - } - if (field == "ttl_mode") { // ttl mode (uniform/pipe) diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 645abdd204..e967c24697 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -429,6 +429,7 @@ bool VNetOrch::addOperation(const Request& request) uint32_t vni=0; string tunnel; string scope; + swss::MacAddress overlay_dmac; for (const auto& name: request.getAttrFieldNames()) { @@ -460,6 +461,10 @@ bool VNetOrch::addOperation(const Request& request) { advertise_prefix = request.getAttrBool("advertise_prefix"); } + else if (name == "overlay_dmac") + { + overlay_dmac = request.getAttrMacAddress("overlay_dmac"); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -486,7 +491,7 @@ bool VNetOrch::addOperation(const Request& request) if (it == std::end(vnet_table_)) { - VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix }; + VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix, overlay_dmac }; obj = createObject(vnet_name, vnet_info, attrs); create = true; @@ -715,8 +720,11 @@ VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOr handler_map_.insert(handler_pair(APP_VNET_RT_TUNNEL_TABLE_NAME, &VNetRouteOrch::handleTunnel)); state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); + app_db_ = shared_ptr(new DBConnector("APPL_DB", 0)); + state_vnet_rt_tunnel_table_ = unique_ptr(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); + monitor_session_producer_ = unique_ptr
(new Table(app_db_.get(), APP_VNET_MONITOR_TABLE_NAME)); gBfdOrch->attach(this); } @@ -895,6 +903,7 @@ bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, const map& monitors) { SWSS_LOG_ENTER(); @@ -935,7 +944,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP sai_object_id_t nh_id; if (!hasNextHopGroup(vnet, nexthops)) { - setEndpointMonitor(vnet, monitors, nexthops); + setEndpointMonitor(vnet, monitors, nexthops, monitoring, ipPrefix); if (nexthops.getSize() == 1) { NextHopKey nexthop(nexthops.to_string(), true); @@ -952,7 +961,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { if (!addNextHopGroup(vnet, nexthops, vrf_obj)) { - delEndpointMonitor(vnet, nexthops); + delEndpointMonitor(vnet, nexthops, ipPrefix); SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); return false; } @@ -1026,7 +1035,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopKey nexthop(nhg.to_string(), true); vrf_obj->removeTunnelNextHop(nexthop); } - delEndpointMonitor(vnet, nhg); + delEndpointMonitor(vnet, nhg, ipPrefix); } else { @@ -1086,7 +1095,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopKey nexthop(nhg.to_string(), true); vrf_obj->removeTunnelNextHop(nexthop); } - delEndpointMonitor(vnet, nhg); + delEndpointMonitor(vnet, nhg, ipPrefix); } else { @@ -1604,7 +1613,55 @@ void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpo bfd_sessions_.erase(monitor_addr); } -void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops) +void VNetRouteOrch::createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end() && + monitor_info_[vnet][ipPrefix].find(endpoint) != monitor_info_[vnet][ipPrefix].end()) + { + SWSS_LOG_NOTICE("Monitoring session for prefix %s endpoint %s already exist", ipPrefix.to_string().c_str(), endpoint_addr.to_string().c_str()); + return; + } + else + { + vector data; + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + + auto overlay_dmac = vnet_obj->getOverlayDMac(); + string key = ipPrefix.to_string() + ":" + monitor_addr.to_string(); + FieldValueTuple fvTuple1("packet_type", "vxlan"); + data.push_back(fvTuple1); + + FieldValueTuple fvTuple3("overlay_dmac", overlay_dmac.to_string()); + data.push_back(fvTuple3); + + monitor_session_producer_->set(key, data); + + MonitorSessionInfo& info = monitor_info_[vnet][ipPrefix][endpoint]; + info.monitor = monitor_addr; + info.state = MONITOR_SESSION_STATE::MONITOR_SESSION_STATE_DOWN; + } +} + +void VNetRouteOrch::removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end() || + monitor_info_[vnet][ipPrefix].find(endpoint) == monitor_info_[vnet][ipPrefix].end()) + { + SWSS_LOG_NOTICE("Monitor session for prefix %s endpoint %s does not exist", ipPrefix.to_string().c_str(), endpoint_addr.to_string().c_str()); + } + + string key = ipPrefix.to_string() + ":" + monitor_addr.to_string(); + monitor_session_producer_->del(key); + monitor_info_[vnet][ipPrefix].erase(endpoint); +} + +void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, const string& monitoring, IpPrefix& ipPrefix) { SWSS_LOG_ENTER(); @@ -1612,31 +1669,62 @@ void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map nhks = nexthops.getNextHops(); + bool is_custom_monitoring = false; + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + is_custom_monitoring = true; + } for (auto nhk: nhks) { IpAddress ip = nhk.ip_address; - if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { - if (--nexthop_info_[vnet][ip].ref_count == 0) + if (is_custom_monitoring) + { + if ( monitor_info_[vnet][ipPrefix].find(nhk) != monitor_info_[vnet][ipPrefix].end()) { - IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; - removeBfdSession(vnet, nhk, monitor_addr); + removeMonitoringSession(vnet, nhk, monitor_info_[vnet][ipPrefix][nhk].monitor, ipPrefix); + } + } + else + { + if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { + if (--nexthop_info_[vnet][ip].ref_count == 0) + { + IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; + { + removeBfdSession(vnet, nhk, monitor_addr); + } + } } } } + if (is_custom_monitoring) + { + monitor_info_[vnet].erase(ipPrefix); + } } void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile) @@ -1917,7 +2005,9 @@ bool VNetRouteOrch::handleTunnel(const Request& request) vector vni_list; vector monitor_list; string profile = ""; - + vector primary_list; + string monitoring; + swss::IpPrefix adv_prefix; for (const auto& name: request.getAttrFieldNames()) { if (name == "endpoint") @@ -1942,6 +2032,18 @@ bool VNetRouteOrch::handleTunnel(const Request& request) { profile = request.getAttrString(name); } + else if (name == "primary") + { + primary_list = request.getAttrIPList(name); + } + else if (name == "monitoring") + { + monitoring = request.getAttrString(name); + } + else if (name == "adv_prefix") + { + adv_prefix = request.getAttrIpPrefix(name); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -2005,7 +2107,7 @@ bool VNetRouteOrch::handleTunnel(const Request& request) if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, nhg, op, profile, monitors); + return doRouteTask(vnet_name, ip_pfx, nhg, op, profile, monitoring, monitors); } return true; diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 4f63764a0e..3d03e22420 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -24,6 +24,12 @@ extern sai_object_id_t gVirtualRouterId; +enum class MONITOR_SESSION_STATE +{ + MONITOR_SESSION_STATE_UP, + MONITOR_SESSION_STATE_DOWN, + MONITOR_SESSION_STATE_UNKNOWN, +}; const request_description_t vnet_request_description = { { REQ_T_STRING }, { @@ -34,6 +40,8 @@ const request_description_t vnet_request_description = { { "guid", REQ_T_STRING }, { "scope", REQ_T_STRING }, { "advertise_prefix", REQ_T_BOOL}, + { "overlay_dmac", REQ_T_MAC_ADDRESS}, + }, { "vxlan_tunnel", "vni" } // mandatory attributes }; @@ -59,6 +67,7 @@ struct VNetInfo set peers; string scope; bool advertise_prefix; + swss::MacAddress overlay_dmac; }; typedef map vrid_list_t; @@ -86,7 +95,8 @@ class VNetObject peer_list_(vnetInfo.peers), vni_(vnetInfo.vni), scope_(vnetInfo.scope), - advertise_prefix_(vnetInfo.advertise_prefix) + advertise_prefix_(vnetInfo.advertise_prefix), + overlay_dmac_(vnetInfo.overlay_dmac) { } virtual bool updateObj(vector&) = 0; @@ -121,6 +131,11 @@ class VNetObject return advertise_prefix_; } + swss::MacAddress getOverlayDMac() const + { + return overlay_dmac_; + } + virtual ~VNetObject() noexcept(false) {}; private: @@ -129,6 +144,7 @@ class VNetObject uint32_t vni_; string scope_; bool advertise_prefix_; + swss::MacAddress overlay_dmac_; }; struct nextHop @@ -282,6 +298,9 @@ const request_description_t vnet_route_description = { { "mac_address", REQ_T_STRING }, { "endpoint_monitor", REQ_T_IP_LIST }, { "profile", REQ_T_STRING }, + { "primary", REQ_T_IP_LIST }, + { "monitoring", REQ_T_STRING }, + { "adv_prefix", REQ_T_IP_PREFIX }, }, { } }; @@ -326,9 +345,16 @@ struct BfdSessionInfo NextHopKey endpoint; }; +struct MonitorSessionInfo +{ + MONITOR_SESSION_STATE state; + IpAddress monitor; +}; + typedef std::map VNetNextHopGroupInfoTable; typedef std::map VNetTunnelRouteTable; typedef std::map BfdSessionTable; +typedef std::map> MonitorSessionTable; typedef std::map VNetEndpointInfoTable; class VNetRouteOrch : public Orch2, public Subject, public Observer @@ -361,8 +387,11 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); - void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops); - void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); + void createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, + const string& monitoring, IpPrefix& ipPrefix); + void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops, IpPrefix& ipPrefix); void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); void addRouteAdvertisement(IpPrefix& ipPrefix, string& profile); @@ -373,6 +402,7 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, const std::map& monitors=std::map()); template @@ -387,9 +417,12 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer std::map syncd_nexthop_groups_; std::map syncd_tunnel_routes_; BfdSessionTable bfd_sessions_; + std::map monitor_info_; std::map nexthop_info_; ProducerStateTable bfd_session_producer_; + unique_ptr
monitor_session_producer_; shared_ptr state_db_; + shared_ptr app_db_; unique_ptr
state_vnet_rt_tunnel_table_; unique_ptr
state_vnet_rt_adv_table_; }; diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index 87c70945c4..34ff308125 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -2349,6 +2349,10 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) VxlanTunnelOrch* tunnel_orch = gDirectory.get(); Port tunnelPort, vlanPort; + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); + std::string vniVlanMapName; + uint32_t tmp_vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; if (!gPortsOrch->getVlanByVlanId(vlan_id, vlanPort)) { @@ -2356,6 +2360,13 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) return false; } + /* Remote end point can be added only after local VLAN to VNI map gets created */ + if (!vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &tmp_vlan_id)) + { + SWSS_LOG_WARN("Vxlan tunnel map is not created for vni:%d", vni_id); + return false; + } + if (tunnel_orch->getTunnelPort(remote_vtep,tunnelPort)) { SWSS_LOG_INFO("Vxlan tunnelPort exists: %s", remote_vtep.c_str()); @@ -2492,6 +2503,11 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) } VxlanTunnelOrch* tunnel_orch = gDirectory.get(); + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); + std::string vniVlanMapName; + uint32_t tmp_vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; + Port tunnelPort, vlanPort; auto vtep_ptr = evpn_orch->getEVPNVtep(); if (!vtep_ptr) @@ -2507,6 +2523,13 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) return false; } + /* Remote end point can be added only after local VLAN to VNI map gets created */ + if (!vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &tmp_vlan_id)) + { + SWSS_LOG_WARN("Vxlan tunnel map is not created for vni: %d", vni_id); + return false; + } + auto src_vtep = vtep_ptr->getSrcIP().to_string(); if (tunnel_orch->getTunnelPort(src_vtep,tunnelPort, true)) { diff --git a/portsyncd/portsyncd.cpp b/portsyncd/portsyncd.cpp index 4048c1bd18..9ac1a91983 100644 --- a/portsyncd/portsyncd.cpp +++ b/portsyncd/portsyncd.cpp @@ -47,40 +47,40 @@ void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo int main(int argc, char **argv) { - Logger::linkToDbNative("portsyncd"); - int opt; - - while ((opt = getopt(argc, argv, "v:h")) != -1 ) + try { - switch (opt) + Logger::linkToDbNative("portsyncd"); + int opt; + + while ((opt = getopt(argc, argv, "v:h")) != -1 ) { - case 'h': - usage(); - return 1; - default: /* '?' */ - usage(); - return EXIT_FAILURE; + switch (opt) + { + case 'h': + usage(); + return 1; + default: /* '?' */ + usage(); + return EXIT_FAILURE; + } } - } - DBConnector cfgDb("CONFIG_DB", 0); - DBConnector appl_db("APPL_DB", 0); - DBConnector state_db("STATE_DB", 0); - ProducerStateTable p(&appl_db, APP_PORT_TABLE_NAME); + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appl_db("APPL_DB", 0); + DBConnector state_db("STATE_DB", 0); + ProducerStateTable p(&appl_db, APP_PORT_TABLE_NAME); - Table cfgDeviceMetaDataTable(&cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); - if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", g_switchType)) - { - //Switch type is not configured. Consider it default = "switch" (regular switch) - g_switchType = "switch"; - } + Table cfgDeviceMetaDataTable(&cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", g_switchType)) + { + //Switch type is not configured. Consider it default = "switch" (regular switch) + g_switchType = "switch"; + } - WarmStart::initialize("portsyncd", "swss"); - WarmStart::checkWarmStart("portsyncd", "swss"); - const bool warm = WarmStart::isWarmStart(); + WarmStart::initialize("portsyncd", "swss"); + WarmStart::checkWarmStart("portsyncd", "swss"); + const bool warm = WarmStart::isWarmStart(); - try - { NetLink netlink; Select s; @@ -144,6 +144,16 @@ int main(int argc, char **argv) } } } + catch (const swss::RedisError& e) + { + cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; + return EXIT_FAILURE; + } + catch (const std::out_of_range& e) + { + cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; + return EXIT_FAILURE; + } catch (const std::exception& e) { cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; diff --git a/tests/conftest.py b/tests/conftest.py index 1ffe5c18ce..092aa90e27 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -26,6 +26,7 @@ from dvslib import dvs_lag from dvslib import dvs_mirror from dvslib import dvs_policer +from dvslib import dvs_hash from buffer_model import enable_dynamic_buffer @@ -157,6 +158,8 @@ def _populate_default_asic_db_values(self) -> None: self.default_acl_tables = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE") self.default_acl_entries = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY") + self.default_hash_keys = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_HASH") + self.default_copp_policers = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_POLICER") @@ -607,7 +610,7 @@ def restart(self) -> None: self.check_ready_status_and_init_db() def runcmd(self, cmd: str, include_stderr=True) -> Tuple[int, str]: - res = self.ctn.exec_run(cmd) + res = self.ctn.exec_run(cmd, stdout=True, stderr=include_stderr) exitcode = res.exit_code out = res.output.decode("utf-8") @@ -1344,6 +1347,7 @@ def get_asic_db(self) -> AsicDbValidator: db = DVSDatabase(self.ASIC_DB_ID, self.redis_sock) db.default_acl_tables = self.asicdb.default_acl_tables db.default_acl_entries = self.asicdb.default_acl_entries + db.default_hash_keys = self.asicdb.default_hash_keys db.default_copp_policers = self.asicdb.default_copp_policers db.port_name_map = self.asicdb.portnamemap db.default_vlan_id = self.asicdb.default_vlan_id @@ -1924,6 +1928,11 @@ def dvs_policer_manager(request, dvs): request.cls.dvs_policer = dvs_policer.DVSPolicer(dvs.get_asic_db(), dvs.get_config_db()) +@pytest.fixture(scope="class") +def dvs_hash_manager(request, dvs): + request.cls.dvs_hash = dvs_hash.DVSHash(dvs.get_asic_db(), + dvs.get_config_db()) + ##################### DPB fixtures ########################################### def create_dpb_config_file(dvs): cmd = "sonic-cfggen -j /etc/sonic/init_cfg.json -j /tmp/ports.json --print-data > /tmp/dpb_config_db.json" diff --git a/tests/dvslib/dvs_hash.py b/tests/dvslib/dvs_hash.py new file mode 100644 index 0000000000..5ac896962c --- /dev/null +++ b/tests/dvslib/dvs_hash.py @@ -0,0 +1,80 @@ +"""Utilities for interacting with HASH objects when writing VS tests.""" +from typing import Dict, List + + +class DVSHash: + """Manage hash objects on the virtual switch.""" + + CDB_SWITCH_HASH = "SWITCH_HASH" + KEY_SWITCH_HASH_GLOBAL = "GLOBAL" + + ADB_HASH = "ASIC_STATE:SAI_OBJECT_TYPE_HASH" + + def __init__(self, asic_db, config_db): + """Create a new DVS hash manager.""" + self.asic_db = asic_db + self.config_db = config_db + + def update_switch_hash( + self, + qualifiers: Dict[str, str] + ) -> None: + """Update switch hash global in Config DB.""" + self.config_db.update_entry(self.CDB_SWITCH_HASH, self.KEY_SWITCH_HASH_GLOBAL, qualifiers) + + def get_hash_ids( + self, + expected: int = None + ) -> List[str]: + """Get all of the hash ids in ASIC DB. + + Args: + expected: The number of hash ids that are expected to be present in ASIC DB. + + Returns: + The list of hash ids in ASIC DB. + """ + if expected is None: + return self.asic_db.get_keys(self.ADB_HASH) + + num_keys = len(self.asic_db.default_hash_keys) + expected + keys = self.asic_db.wait_for_n_keys(self.ADB_HASH, num_keys) + + for k in self.asic_db.default_hash_keys: + assert k in keys + + return [k for k in keys if k not in self.asic_db.default_hash_keys] + + def verify_hash_count( + self, + expected: int + ) -> None: + """Verify that there are N hash objects in ASIC DB. + + Args: + expected: The number of hash ids that are expected to be present in ASIC DB. + """ + self.get_hash_ids(expected) + + def verify_hash_generic( + self, + sai_hash_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that hash object has correct ASIC DB representation. + + Args: + sai_hash_id: The specific hash id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + """ + entry = self.asic_db.wait_for_entry(self.ADB_HASH, sai_hash_id) + + for k, v in entry.items(): + if k == "NULL": + continue + elif k in sai_qualifiers: + if k == "SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST": + hfList = v[v.index(":")+1:].split(",") + assert set(sai_qualifiers[k]) == set(hfList) + else: + assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) diff --git a/tests/dvslib/dvs_pbh.py b/tests/dvslib/dvs_pbh.py index df612638ea..2caf059adc 100644 --- a/tests/dvslib/dvs_pbh.py +++ b/tests/dvslib/dvs_pbh.py @@ -11,6 +11,7 @@ class DVSPbh: CDB_PBH_HASH_FIELD = "PBH_HASH_FIELD" ADB_PBH_HASH = "ASIC_STATE:SAI_OBJECT_TYPE_HASH" + ADB_PBH_HASH_FIELD = "ASIC_STATE:SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD" def __init__(self, asic_db, config_db): """Create a new DVS PBH Manager.""" @@ -110,13 +111,6 @@ def remove_pbh_hash( """Remove PBH hash from Config DB.""" self.config_db.delete_entry(self.CDB_PBH_HASH, hash_name) - def verify_pbh_hash_count( - self, - expected: int - ) -> None: - """Verify that there are N hash objects in ASIC DB.""" - self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_HASH", expected) - def create_pbh_hash_field( self, hash_field_name: str, @@ -147,11 +141,4 @@ def verify_pbh_hash_field_count( expected: int ) -> None: """Verify that there are N hash field objects in ASIC DB.""" - self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD", expected) - - def get_pbh_hash_ids( - self, - expected: int - ) -> List[str]: - """Get all of the PBH hash IDs in ASIC DB.""" - return self.asic_db.wait_for_n_keys(self.ADB_PBH_HASH, expected) + self.asic_db.wait_for_n_keys(self.ADB_PBH_HASH_FIELD, expected) diff --git a/tests/evpn_tunnel.py b/tests/evpn_tunnel.py index 975629ed3c..774a3b3960 100644 --- a/tests/evpn_tunnel.py +++ b/tests/evpn_tunnel.py @@ -697,6 +697,18 @@ def check_vxlan_dip_tunnel(self, dvs, vtep_name, src_ip, dip): self.bridgeport_map[dip] = ret[0] + def check_vxlan_dip_tunnel_not_created(self, dvs, vtep_name, src_ip, dip): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + expected_state_attributes = { + 'src_ip': src_ip, + 'dst_ip': dip, + 'tnl_src': 'EVPN', + } + + ret = self.helper.get_key_with_attr(state_db, 'VXLAN_TUNNEL_TABLE', expected_state_attributes) + assert len(ret) == 0, "Tunnel Statetable entry created" + def check_vlan_extension_delete(self, dvs, vlan_name, dip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -777,6 +789,32 @@ def check_vlan_extension_p2mp(self, dvs, vlan_name, sip, dip): assert len(ret) == 1, "More than 1 L2MC group member created" self.l2mcgroup_member_map[dip+vlan_name] = ret[0] + def check_vlan_extension_not_created_p2mp(self, dvs, vlan_name, sip, dip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN') + expected_vlan_attributes = { + 'SAI_VLAN_ATTR_VLAN_ID': vlan_name, + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN', expected_vlan_attributes) + assert len(ret) > 0, "VLAN entry not created" + assert len(ret) == 1, "More than 1 VLAN entry created" + + self.vlan_id_map[vlan_name] = ret[0] + status, fvs = tbl.get(self.vlan_id_map[vlan_name]) + + print(fvs) + + uuc_flood_type = None + bc_flood_type = None + uuc_flood_group = None + bc_flood_group = None + + for attr,value in fvs: + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE', "Unknown unicast flood control type is set" + assert attr != 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE', "Broadcast flood control type is set" + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_GROUP', "Unknown unicast flood group is set" + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE', "Broadcast flood group is set" + def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 2d9e642fee..f07caf6165 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -4,9 +4,9 @@ P4_ORCH_DIR = $(top_srcdir)/orchagent/p4orch CFLAGS_SAI = -I /usr/include/sai -TESTS = tests tests_intfmgrd tests_portsyncd tests_fpmsyncd +TESTS = tests tests_intfmgrd tests_portsyncd tests_fpmsyncd tests_response_publisher -noinst_PROGRAMS = tests tests_intfmgrd tests_portsyncd tests_fpmsyncd +noinst_PROGRAMS = tests tests_intfmgrd tests_portsyncd tests_fpmsyncd tests_response_publisher LDADD_SAI = -lsaimeta -lsaimetadata -lsaivs -lsairedis @@ -21,7 +21,7 @@ LDADD_GTEST = -L/usr/src/gtest ## Orchagent Unit Tests -tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent -I$(P4_ORCH_DIR)/tests +tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent -I$(P4_ORCH_DIR)/tests -I$(top_srcdir)/warmrestart tests_SOURCES = aclorch_ut.cpp \ portsorch_ut.cpp \ @@ -30,6 +30,7 @@ tests_SOURCES = aclorch_ut.cpp \ bufferorch_ut.cpp \ buffermgrdyn_ut.cpp \ fdborch/flush_syncd_notif_ut.cpp \ + copp_ut.cpp \ copporch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ @@ -48,6 +49,8 @@ tests_SOURCES = aclorch_ut.cpp \ swssnet_ut.cpp \ flowcounterrouteorch_ut.cpp \ orchdaemon_ut.cpp \ + warmrestartassist_ut.cpp \ + test_failure_handling.cpp \ $(top_srcdir)/lib/gearboxutils.cpp \ $(top_srcdir)/lib/subintf.cpp \ $(top_srcdir)/orchagent/orchdaemon.cpp \ @@ -108,7 +111,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/dash/dashorch.cpp \ $(top_srcdir)/orchagent/dash/dashrouteorch.cpp \ $(top_srcdir)/orchagent/dash/dashvnetorch.cpp \ - $(top_srcdir)/cfgmgr/buffermgrdyn.cpp + $(top_srcdir)/cfgmgr/buffermgrdyn.cpp \ + $(top_srcdir)/warmrestart/warmRestartAssist.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp $(FLEX_CTR_DIR)/flowcounterrouteorch.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp @@ -183,3 +187,20 @@ tests_fpmsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST tests_fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_fpmsyncd_INCLUDES) tests_fpmsyncd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main + +## response publisher unit tests + +tests_response_publisher_SOURCES = response_publisher/response_publisher_ut.cpp \ + $(top_srcdir)/orchagent/response_publisher.cpp \ + mock_orchagent_main.cpp \ + mock_dbconnector.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + mock_redisreply.cpp + +tests_response_publisher_INCLUDES = $(tests_INCLUDES) +tests_response_publisher_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_response_publisher_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_response_publisher_INCLUDES) +tests_response_publisher_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread + diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index d8fe2bbd2a..5c8866b240 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -1409,7 +1409,7 @@ namespace aclorch_test { { ACL_TABLE_TYPE_MATCHES, - string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + comma + MATCH_BTH_OPCODE + comma + MATCH_AETH_SYNDROME }, { ACL_TABLE_TYPE_BPOINT_TYPES, @@ -1431,6 +1431,8 @@ namespace aclorch_test { "SAI_ACL_TABLE_ATTR_FIELD_SRC_IP", "true" }, { "SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE", "true" }, { "SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE", "1:SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE" }, + { "SAI_ACL_TABLE_ATTR_FIELD_BTH_OPCODE", "true" }, + { "SAI_ACL_TABLE_ATTR_FIELD_AETH_SYNDROME", "true" }, }; ASSERT_TRUE(validateAclTable( @@ -1477,6 +1479,42 @@ namespace aclorch_test // DST_IP is not in the table type ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_BTH_OPCODE, "0x60" }, + } + } + } + ) + ); + + // MATCH_BTH_OPCODE invalid format + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_AETH_SYNDROME, "0x60" }, + } + } + } + ) + ); + + // MATCH_AETH_SYNDROME invalid format + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + orch->doAclRuleTask( deque( { @@ -1486,6 +1524,8 @@ namespace aclorch_test { { MATCH_SRC_IP, "1.1.1.1/32" }, { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_BTH_OPCODE, "0x60/0xff" }, + { MATCH_AETH_SYNDROME, "0x60/0x60" }, } } } diff --git a/tests/mock_tests/copp_cfg.json b/tests/mock_tests/copp_cfg.json new file mode 100644 index 0000000000..46d921b827 --- /dev/null +++ b/tests/mock_tests/copp_cfg.json @@ -0,0 +1,111 @@ +{ + "COPP_GROUP": { + "default": { + "queue": "0", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue4_group1": { + "trap_action":"trap", + "trap_priority":"4", + "queue": "4" + }, + "queue4_group2": { + "trap_action":"copy", + "trap_priority":"4", + "queue": "4", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue4_group3": { + "trap_action":"trap", + "trap_priority":"4", + "queue": "4" + }, + "queue1_group1": { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"6000", + "cbs":"6000", + "red_action":"drop" + }, + "queue1_group2": { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue2_group1": { + "cbs": "1000", + "cir": "1000", + "genetlink_mcgrp_name": "packets", + "genetlink_name": "psample", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "2", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "1" + + } + }, + "COPP_TRAP": { + "bgp": { + "trap_ids": "bgp,bgpv6", + "trap_group": "queue4_group1" + }, + "lacp": { + "trap_ids": "lacp", + "trap_group": "queue4_group1", + "always_enabled": "true" + }, + "arp": { + "trap_ids": "arp_req,arp_resp,neigh_discovery", + "trap_group": "queue4_group2", + "always_enabled": "true" + }, + "lldp": { + "trap_ids": "lldp", + "trap_group": "queue4_group3" + }, + "dhcp_relay": { + "trap_ids": "dhcp,dhcpv6", + "trap_group": "queue4_group3" + }, + "udld": { + "trap_ids": "udld", + "trap_group": "queue4_group3", + "always_enabled": "true" + }, + "ip2me": { + "trap_ids": "ip2me", + "trap_group": "queue1_group1", + "always_enabled": "true" + }, + "macsec": { + "trap_ids": "eapol", + "trap_group": "queue4_group3" + }, + "nat": { + "trap_ids": "src_nat_miss,dest_nat_miss", + "trap_group": "queue1_group2" + }, + "sflow": { + "trap_group": "queue2_group1", + "trap_ids": "sample_packet" + } + } +} diff --git a/tests/mock_tests/copp_ut.cpp b/tests/mock_tests/copp_ut.cpp new file mode 100644 index 0000000000..1c3b766e1c --- /dev/null +++ b/tests/mock_tests/copp_ut.cpp @@ -0,0 +1,76 @@ +#include "gtest/gtest.h" +#include +#include "schema.h" +#include "warm_restart.h" +#include "ut_helper.h" +#include "coppmgr.h" +#include "coppmgr.cpp" +#include +#include +using namespace std; +using namespace swss; + +void create_init_file() +{ + int status = system("sudo mkdir /etc/sonic/"); + ASSERT_EQ(status, 0); + + status = system("sudo chmod 777 /etc/sonic/"); + ASSERT_EQ(status, 0); + + status = system("sudo cp copp_cfg.json /etc/sonic/"); + ASSERT_EQ(status, 0); +} + +void cleanup() +{ + int status = system("sudo rm -rf /etc/sonic/"); + ASSERT_EQ(status, 0); +} + +TEST(CoppMgrTest, CoppTest) +{ + create_init_file(); + + const vector cfg_copp_tables = { + CFG_COPP_TRAP_TABLE_NAME, + CFG_COPP_GROUP_TABLE_NAME, + CFG_FEATURE_TABLE_NAME, + }; + + WarmStart::initialize("coppmgrd", "swss"); + WarmStart::checkWarmStart("coppmgrd", "swss"); + + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appDb("APPL_DB", 0); + DBConnector stateDb("STATE_DB", 0); + + /* The test will set an entry with queue1_group1|cbs value which differs from the init value + * found in the copp_cfg.json file. Then coppmgr constructor will be called and it will detect + * that there is already an entry for queue1_group1|cbs with different value and it should be + * overwritten with the init value. + * hget will verify that this indeed happened. + */ + Table coppTable = Table(&appDb, APP_COPP_TABLE_NAME); + coppTable.set("queue1_group1", + { + {"cbs", "6100"}, + {"cir", "6000"}, + {"meter_type", "packets"}, + {"mode", "sr_tcm"}, + {"queue", "1"}, + {"red_action", "drop"}, + {"trap_action", "trap"}, + {"trap_priority", "1"}, + {"trap_ids", "ip2me"} + }); + + CoppMgr coppmgr(&cfgDb, &appDb, &stateDb, cfg_copp_tables); + + string overide_val; + coppTable.hget("queue1_group1", "cbs",overide_val); + EXPECT_EQ( overide_val, "6000"); + + cleanup(); +} + diff --git a/tests/mock_tests/fake_response_publisher.cpp b/tests/mock_tests/fake_response_publisher.cpp index 94480913d5..4c2c2b0370 100644 --- a/tests/mock_tests/fake_response_publisher.cpp +++ b/tests/mock_tests/fake_response_publisher.cpp @@ -3,7 +3,7 @@ #include "response_publisher.h" -ResponsePublisher::ResponsePublisher() : m_db("APPL_STATE_DB", 0) {} +ResponsePublisher::ResponsePublisher(bool buffered) : m_db(std::make_unique("APPL_STATE_DB", 0)), m_buffered(buffered) {} void ResponsePublisher::publish( const std::string& table, const std::string& key, @@ -20,3 +20,7 @@ void ResponsePublisher::writeToDB( const std::string& table, const std::string& key, const std::vector& values, const std::string& op, bool replace) {} + +void ResponsePublisher::flush() {} + +void ResponsePublisher::setBuffered(bool buffered) {} diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 836e862d55..740744df51 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -9,6 +9,7 @@ #include "notifier.h" #define private public #include "pfcactionhandler.h" +#include #undef private #include @@ -21,6 +22,8 @@ namespace portsorch_test sai_port_api_t ut_sai_port_api; sai_port_api_t *pold_sai_port_api; + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; bool not_support_fetching_fec; vector mock_port_fec_modes = {SAI_PORT_FEC_MODE_RS, SAI_PORT_FEC_MODE_FC}; @@ -66,9 +69,28 @@ namespace portsorch_test _sai_set_port_fec_count++; _sai_port_fec_mode = attr[0].value.s32; } + else if (attr[0].id == SAI_PORT_ATTR_AUTO_NEG_MODE) + { + /* Simulating failure case */ + return SAI_STATUS_FAILURE; + } return pold_sai_port_api->set_port_attribute(port_id, attr); } + uint32_t *_sai_syncd_notifications_count; + int32_t *_sai_syncd_notification_event; + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD) + { + *_sai_syncd_notifications_count =+ 1; + *_sai_syncd_notification_event = attr[0].value.s32; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + void _hook_sai_port_api() { ut_sai_port_api = *sai_port_api; @@ -83,6 +105,19 @@ namespace portsorch_test sai_port_api = pold_sai_port_api; } + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + sai_queue_api_t ut_sai_queue_api; sai_queue_api_t *pold_sai_queue_api; int _sai_set_queue_attr_count = 0; @@ -473,6 +508,52 @@ namespace portsorch_test _unhook_sai_port_api(); } + TEST_F(PortsOrchTest, PortTestSAIFailureHandling) + { + _hook_sai_port_api(); + _hook_sai_switch_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + + entries.push_back({"Ethernet0", "SET", + { + {"autoneg", "on"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + ASSERT_DEATH({static_cast(gPortsOrch)->doTask();}, ""); + + ASSERT_EQ(*_sai_syncd_notifications_count, 1); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + _unhook_sai_port_api(); + _unhook_sai_switch_api(); + } + TEST_F(PortsOrchTest, PortReadinessColdBoot) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index c727aad70d..ab4aa9e972 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -25,6 +25,7 @@ namespace qosorch_test int sai_remove_scheduler_count; int sai_set_wred_attribute_count; sai_object_id_t switch_dscp_to_tc_map_id; + TunnelDecapOrch *tunnel_decap_orch; sai_remove_scheduler_fn old_remove_scheduler; sai_scheduler_api_t ut_sai_scheduler_api, *pold_sai_scheduler_api; @@ -36,6 +37,7 @@ namespace qosorch_test sai_qos_map_api_t ut_sai_qos_map_api, *pold_sai_qos_map_api; sai_set_switch_attribute_fn old_set_switch_attribute_fn; sai_switch_api_t ut_sai_switch_api, *pold_sai_switch_api; + sai_tunnel_api_t ut_sai_tunnel_api, *pold_sai_tunnel_api; typedef struct { @@ -212,6 +214,40 @@ namespace qosorch_test return rc; } + sai_status_t _ut_stub_sai_create_tunnel( + _Out_ sai_object_id_t *tunnel_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_create_tunnel_term_table_entry( + _Out_ sai_object_id_t *tunnel_term_table_entry_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_term_table_entry_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + void checkTunnelAttribute(sai_attr_id_t attr) + { + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_ENCAP_ECN_MODE); + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_DECAP_ECN_MODE); + } + + sai_status_t _ut_stub_sai_set_tunnel_attribute( + _In_ sai_object_id_t tunnel_id, + _In_ const sai_attribute_t *attr) + { + checkTunnelAttribute(attr->id); + return SAI_STATUS_ATTR_NOT_SUPPORTED_0; + } + struct QosOrchTest : public ::testing::Test { QosOrchTest() @@ -292,6 +328,14 @@ namespace qosorch_test sai_switch_api = &ut_sai_switch_api; ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + // Mock tunnel API + pold_sai_tunnel_api = sai_tunnel_api; + ut_sai_tunnel_api = *pold_sai_tunnel_api; + sai_tunnel_api = &ut_sai_tunnel_api; + ut_sai_tunnel_api.set_tunnel_attribute = _ut_stub_sai_set_tunnel_attribute; + ut_sai_tunnel_api.create_tunnel = _ut_stub_sai_create_tunnel; + ut_sai_tunnel_api.create_tunnel_term_table_entry = _ut_stub_sai_create_tunnel_term_table_entry; + // Init switch and create dependencies m_app_db = make_shared("APPL_DB", 0); m_config_db = make_shared("CONFIG_DB", 0); @@ -381,6 +425,9 @@ namespace qosorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + ASSERT_EQ(tunnel_decap_orch, nullptr); + tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, CFG_SCHEDULER_TABLE_NAME, @@ -394,7 +441,8 @@ namespace qosorch_test CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, CFG_DSCP_TO_FC_MAP_TABLE_NAME, - CFG_EXP_TO_FC_MAP_TABLE_NAME + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME }; gQosOrch = new QosOrch(m_config_db.get(), qos_tables); @@ -557,10 +605,14 @@ namespace qosorch_test delete gQosOrch; gQosOrch = nullptr; + delete tunnel_decap_orch; + tunnel_decap_orch = nullptr; + sai_qos_map_api = pold_sai_qos_map_api; sai_scheduler_api = pold_sai_scheduler_api; sai_wred_api = pold_sai_wred_api; sai_switch_api = pold_sai_switch_api; + sai_tunnel_api = pold_sai_tunnel_api; ut_helper::uninitSaiApi(); } }; @@ -1458,4 +1510,80 @@ namespace qosorch_test // Drain DSCP_TO_TC_MAP and PORT_QOS_MAP table static_cast(gQosOrch)->doTask(); } + + /* + * Set tunnel QoS attribute test - OA should skip settings + */ + TEST_F(QosOrchTest, QosOrchTestSetTunnelQoSAttribute) + { + // Create a new dscp to tc map + Table tcToDscpMapTable = Table(m_config_db.get(), CFG_TC_TO_DSCP_MAP_TABLE_NAME); + tcToDscpMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + gQosOrch->addExistingData(&tcToDscpMapTable); + static_cast(gQosOrch)->doTask(); + + std::deque entries; + entries.push_back({"MuxTunnel0", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"decap_tc_to_pg_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + auto consumer = dynamic_cast(tunnel_decap_orch->getExecutor(APP_TUNNEL_DECAP_TABLE_NAME)); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set an attribute that is not supported by vendor + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_tc_to_pg_map", "AZURE"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel0", "SET", + { + {"encap_ecn_mode", "standard"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel1", "SET", + { + {"ecn_mode", "copy_from_outer"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + } } diff --git a/tests/mock_tests/response_publisher/response_publisher_ut.cpp b/tests/mock_tests/response_publisher/response_publisher_ut.cpp new file mode 100644 index 0000000000..3738ac6d87 --- /dev/null +++ b/tests/mock_tests/response_publisher/response_publisher_ut.cpp @@ -0,0 +1,37 @@ +#include "response_publisher.h" + +#include + +bool gResponsePublisherRecord{false}; +bool gResponsePublisherLogRotate{false}; +std::ofstream gResponsePublisherRecordOfs; +std::string gResponsePublisherRecordFile; + +using namespace swss; + +TEST(ResponsePublisher, TestPublish) +{ + DBConnector conn{"APPL_STATE_DB", 0}; + Table stateTable{&conn, "SOME_TABLE"}; + std::string value; + ResponsePublisher publisher{}; + + publisher.publish("SOME_TABLE", "SOME_KEY", {{"field", "value"}}, ReturnCode(SAI_STATUS_SUCCESS)); + ASSERT_TRUE(stateTable.hget("SOME_KEY", "field", value)); + ASSERT_EQ(value, "value"); +} + +TEST(ResponsePublisher, TestPublishBuffered) +{ + DBConnector conn{"APPL_STATE_DB", 0}; + Table stateTable{&conn, "SOME_TABLE"}; + std::string value; + ResponsePublisher publisher{}; + + publisher.setBuffered(true); + + publisher.publish("SOME_TABLE", "SOME_KEY", {{"field", "value"}}, ReturnCode(SAI_STATUS_SUCCESS)); + publisher.flush(); + ASSERT_TRUE(stateTable.hget("SOME_KEY", "field", value)); + ASSERT_EQ(value, "value"); +} diff --git a/tests/mock_tests/test_failure_handling.cpp b/tests/mock_tests/test_failure_handling.cpp new file mode 100644 index 0000000000..be53e0fd5f --- /dev/null +++ b/tests/mock_tests/test_failure_handling.cpp @@ -0,0 +1,82 @@ +#include "saihelper.h" +#include "ut_helper.h" +#include + +extern sai_switch_api_t *sai_switch_api; + +namespace saifailure_test +{ + struct SaiFailureTest : public ::testing::Test + { + }; + uint32_t *_sai_syncd_notifications_count; + int32_t *_sai_syncd_notification_event; + sai_switch_api_t *pold_sai_switch_api; + sai_switch_api_t ut_sai_switch_api; + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD) + { + *_sai_syncd_notifications_count = *_sai_syncd_notifications_count + 1; + *_sai_syncd_notification_event = attr[0].value.s32; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + + TEST_F(SaiFailureTest, handleSaiFailure) + { + _hook_sai_switch_api(); + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_FDB, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_HOSTIF, SAI_STATUS_INVALID_PARAMETER);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_HOSTIF, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_TUNNEL, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiRemoveStatus(SAI_API_LAG, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + _unhook_sai_switch_api(); + } +} diff --git a/tests/mock_tests/warmrestartassist_ut.cpp b/tests/mock_tests/warmrestartassist_ut.cpp new file mode 100644 index 0000000000..6adcd08baf --- /dev/null +++ b/tests/mock_tests/warmrestartassist_ut.cpp @@ -0,0 +1,64 @@ +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +//#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "warm_restart.h" +#define private public +#include "warmRestartAssist.h" +#undef private + +#define APP_WRA_TEST_TABLE_NAME "TEST_TABLE" + +namespace warmrestartassist_test +{ + using namespace std; + + shared_ptr m_app_db = make_shared("APPL_DB", 0); + shared_ptr m_app_db_pipeline = make_shared(m_app_db.get()); + shared_ptr m_wra_test_table = make_shared(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + + AppRestartAssist *appRestartAssist; + + struct WarmrestartassistTest : public ::testing::Test + { + WarmrestartassistTest() + { + appRestartAssist = new AppRestartAssist(m_app_db_pipeline.get(), "testsyncd", "swss", 0); + appRestartAssist->m_warmStartInProgress = true; + appRestartAssist->registerAppTable(APP_WRA_TEST_TABLE_NAME, m_wra_test_table.get()); + } + + void SetUp() override + { + testing_db::reset(); + + Table testTable = Table(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + testTable.set("key", + { + {"field", "value0"}, + }); + } + + void TearDown() override + { + } + }; + + TEST_F(WarmrestartassistTest, warmRestartAssistTest) + { + appRestartAssist->readTablesToMap(); + vector fvVector; + fvVector.emplace_back("field", "value1"); + appRestartAssist->insertToMap(APP_WRA_TEST_TABLE_NAME, "key", fvVector, false); + appRestartAssist->insertToMap(APP_WRA_TEST_TABLE_NAME, "key", fvVector, false); + appRestartAssist->reconcile(); + + fvVector.clear(); + Table testTable = Table(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + ASSERT_TRUE(testTable.get("key", fvVector)); + ASSERT_EQ(fvField(fvVector[0]), "field"); + ASSERT_EQ(fvValue(fvVector[0]), "value1"); + } +} diff --git a/tests/test_bfd.py b/tests/test_bfd.py index 2feef60acb..3a087e2011 100644 --- a/tests/test_bfd.py +++ b/tests/test_bfd.py @@ -68,7 +68,7 @@ def test_addRemoveBfdSession(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "1"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -108,7 +108,7 @@ def test_addRemoveBfdSession_ipv6(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "2000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "2"} self.check_state_bfd_session_value("default|default|2000::2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -150,7 +150,7 @@ def test_addRemoveBfdSession_interface(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "3"} self.check_state_bfd_session_value("default|Ethernet0|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -192,7 +192,7 @@ def test_addRemoveBfdSession_txrx_interval(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", - "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "500", "multiplier" : "10", "multihop": "false", "local_discriminator" : "4"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -233,7 +233,7 @@ def test_addRemoveBfdSession_multiplier(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "5", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "5", "multihop": "false", "local_discriminator" : "5"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -274,7 +274,7 @@ def test_addRemoveBfdSession_multihop(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "true"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "true", "local_discriminator" : "6"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -314,7 +314,7 @@ def test_addRemoveBfdSession_type(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "7"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -357,7 +357,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 1 key_state_db1 = "default|default|10.0.0.2" expected_sdb_values1 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "8"} self.check_state_bfd_session_value(key_state_db1, expected_sdb_values1) # Create BFD session 2 @@ -385,7 +385,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 2 key_state_db2 = "default|default|10.0.1.2" expected_sdb_values2 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", - "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "500", "multiplier" : "10", "multihop": "false", "local_discriminator" : "9"} self.check_state_bfd_session_value(key_state_db2, expected_sdb_values2) # Create BFD session 3 @@ -411,7 +411,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 3 key_state_db3 = "default|default|2000::2" expected_sdb_values3 = {"state": "Down", "type": "demand_active", "local_addr" : "2000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "10"} self.check_state_bfd_session_value(key_state_db3, expected_sdb_values3) # Create BFD session 4 @@ -437,7 +437,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 4 key_state_db4 = "default|default|3000::2" expected_sdb_values4 = {"state": "Down", "type": "async_active", "local_addr" : "3000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "11"} self.check_state_bfd_session_value(key_state_db4, expected_sdb_values4) # Update BFD session states diff --git a/tests/test_evpn_tunnel.py b/tests/test_evpn_tunnel.py index b58944f7ce..21313a6c93 100644 --- a/tests/test_evpn_tunnel.py +++ b/tests/test_evpn_tunnel.py @@ -59,6 +59,9 @@ def test_p2p_tunnel(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + vxlan_obj.create_vlan1(dvs,"Vlan102") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') @@ -161,3 +164,46 @@ def test_p2mp_tunnel_with_dip(self, dvs, testlog): print("Testing SIP Tunnel Deletion") vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + + def test_delayed_vlan_vni_map(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7', '1001') + vxlan_obj.check_vxlan_dip_tunnel_not_created(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + + print("Testing VLAN 101 extension") + vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + vxlan_obj.check_vlan_extension(dvs, '101', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete(dvs, '101', '7.7.7.7') + vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') diff --git a/tests/test_evpn_tunnel_p2mp.py b/tests/test_evpn_tunnel_p2mp.py index 22f12f0beb..4b52d69e20 100644 --- a/tests/test_evpn_tunnel_p2mp.py +++ b/tests/test_evpn_tunnel_p2mp.py @@ -57,6 +57,9 @@ def test_vlan_extension(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + vxlan_obj.create_vlan1(dvs,"Vlan102") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') @@ -122,3 +125,44 @@ def test_vlan_extension(self, dvs, testlog): vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) + + def test_delayed_vlan_vni_map(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7', '1001') + vxlan_obj.check_vlan_extension_not_created_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + + print("Testing VLAN 101 extension") + vxlan_obj.check_vlan_extension_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) diff --git a/tests/test_mux.py b/tests/test_mux.py index bd381a49bf..8313980130 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -537,6 +537,80 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): ps._del(rtprefix) + def create_and_test_NH_routes(self, appdb, asicdb, dvs, dvs_route, mac): + ''' + Tests case where neighbor is removed in standby and added in active with route + ''' + nh_route = "2.2.2.0/24" + nh_route_ipv6 = "2023::/64" + neigh_ip = self.SERV1_IPV4 + neigh_ipv6 = self.SERV1_IPV6 + apdb = dvs.get_app_db() + + # Setup + self.set_mux_state(appdb, "Ethernet0", "active") + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + apdb.wait_for_entry("ROUTE_TABLE", nh_route) + apdb.wait_for_entry("ROUTE_TABLE", nh_route_ipv6) + + rtkeys = dvs_route.check_asicdb_route_entries([nh_route]) + rtkeys_ipv6 = dvs_route.check_asicdb_route_entries([nh_route_ipv6]) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) + + # Set state to standby and delete neighbor + self.set_mux_state(appdb, "Ethernet0", "standby") + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) + + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ip) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ipv6) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) + + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) + + # Set state to active, learn neighbor again + self.set_mux_state(appdb, "Ethernet0", "active") + + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + self.check_neigh_in_asic_db(asicdb, neigh_ip) + self.check_neigh_in_asic_db(asicdb, neigh_ipv6) + + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) + dvs.runcmd( + "ip neigh flush " + neigh_ip + ) + dvs.runcmd( + "ip neigh flush " + neigh_ipv6 + ) + + # Cleanup + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) + def get_expected_sai_qualifiers(self, portlist, dvs_acl): expected_sai_qualifiers = { "SAI_ACL_ENTRY_ATTR_PRIORITY": self.ACL_PRIORITY, @@ -1099,6 +1173,14 @@ def test_Route(self, dvs, dvs_route, testlog): self.create_and_test_route(appdb, asicdb, dvs, dvs_route) + def test_NH(self, dvs, dvs_route, intf_fdb_map, setup_peer_switch, setup_tunnel, testlog): + """ test NH routes and mux state change """ + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + mac = intf_fdb_map["Ethernet0"] + + self.create_and_test_NH_routes(appdb, asicdb, dvs, dvs_route, mac) + def test_acl(self, dvs, dvs_acl, testlog): """ test acl and mux state change """ diff --git a/tests/test_pbh.py b/tests/test_pbh.py index 270e59d429..65401a3ea9 100644 --- a/tests/test_pbh.py +++ b/tests/test_pbh.py @@ -130,6 +130,7 @@ def test_PbhTablePortChannelBinding(self, testlog): self.dvs_lag.get_and_verify_port_channel(0) +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhBasicFlows: def test_PbhHashFieldCreationDeletion(self, testlog): try: @@ -162,12 +163,12 @@ def test_PbhHashCreationDeletion(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) finally: # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -205,7 +206,7 @@ def test_PbhRuleCreationDeletion(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH table pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) @@ -247,7 +248,7 @@ def test_PbhRuleCreationDeletion(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -255,6 +256,7 @@ def test_PbhRuleCreationDeletion(self, testlog): self.dvs_pbh.verify_pbh_hash_field_count(0) +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhBasicEditFlows: def test_PbhRuleUpdate(self, testlog): try: @@ -273,7 +275,7 @@ def test_PbhRuleUpdate(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH table pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) @@ -319,7 +321,7 @@ def test_PbhRuleUpdate(self, testlog): flow_counter="ENABLED" ) - hash_id = self.dvs_pbh.get_pbh_hash_ids(1)[0] + hash_id = self.dvs_hash.get_hash_ids(1)[0] counter_id = self.dvs_acl.get_acl_counter_ids(1)[0] sai_attr_dict = { @@ -352,7 +354,7 @@ def test_PbhRuleUpdate(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -377,7 +379,7 @@ def test_PbhRuleUpdateFlowCounter(self, dvs, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH table pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) @@ -463,7 +465,7 @@ def test_PbhRuleUpdateFlowCounter(self, dvs, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -475,6 +477,7 @@ def test_PbhRuleUpdateFlowCounter(self, dvs, testlog): test_flex_counters.post_trap_flow_counter_test(meta_data) +@pytest.mark.usefixtures("dvs_hash_manager") @pytest.mark.usefixtures("dvs_lag_manager") class TestPbhExtendedFlows: class PbhRefCountHelper(object): @@ -596,13 +599,13 @@ def create_hash(self, meta_dict, pbh_ref_count): hash_field_list=meta_dict["hash_field_list"] ) pbh_ref_count.incPbhHashCount() - self.dvs_pbh.verify_pbh_hash_count(pbh_ref_count.getPbhHashCount()) + self.dvs_hash.verify_hash_count(pbh_ref_count.getPbhHashCount()) def remove_hash(self, meta_dict, pbh_ref_count): pbhlogger.info("Remove PBH hash: {}".format(meta_dict["name"])) self.dvs_pbh.remove_pbh_hash(meta_dict["name"]) pbh_ref_count.decPbhHashCount() - self.dvs_pbh.verify_pbh_hash_count(pbh_ref_count.getPbhHashCount()) + self.dvs_hash.verify_hash_count(pbh_ref_count.getPbhHashCount()) def create_table(self, meta_dict, pbh_ref_count): pbhlogger.info("Create PBH table: {}".format(meta_dict["name"])) @@ -909,6 +912,7 @@ def test_PbhNvgreVxlanConfiguration(self, testlog, pbh_nvgre, pbh_vxlan): pass +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhDependencyFlows: def test_PbhHashCreationDeletionWithDependencies(self, testlog): try: @@ -918,7 +922,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -928,7 +932,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): sequence_id=PBH_HASH_FIELD_SEQUENCE_ID ) self.dvs_pbh.verify_pbh_hash_field_count(1) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) finally: # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -938,7 +942,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) self.dvs_pbh.verify_pbh_hash_field_count(0) def test_PbhRuleCreationDeletionWithDependencies(self, testlog): @@ -949,7 +953,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -959,7 +963,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): sequence_id=PBH_HASH_FIELD_SEQUENCE_ID ) self.dvs_pbh.verify_pbh_hash_field_count(1) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH rule attr_dict = { @@ -1009,7 +1013,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) self.dvs_pbh.verify_pbh_hash_field_count(0) diff --git a/tests/test_port_an.py b/tests/test_port_an.py index e8956aee2a..5356d2e837 100644 --- a/tests/test_port_an.py +++ b/tests/test_port_an.py @@ -311,6 +311,48 @@ def test_PortAutoNegRemoteAdvSpeeds(self, dvs, testlog): assert status == True assert "rmt_adv_speeds" in [fv[0] for fv in fvs] + def test_PortAdvWithoutAutoneg(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + ctbl = swsscommon.Table(cdb, "PORT") + stbl = swsscommon.Table(sdb, "PORT_TABLE") + + # set autoneg = off + fvs = swsscommon.FieldValuePairs([("autoneg", "off")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + fvs = swsscommon.FieldValuePairs([("adv_speeds", "100,1000"), + ("adv_interface_types", "CR2,CR4")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_AUTO_NEG_MODE" in [fv[0] for fv in fvs] + assert "SAI_PORT_ATTR_ADVERTISED_SPEED" in [fv[0] for fv in fvs] + assert "SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_AUTO_NEG_MODE": + assert fv[1] == "false" + elif fv[0] == "SAI_PORT_ATTR_ADVERTISED_SPEED": + assert fv[1] == "2:100,1000" + elif fv[0] == "SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE": + assert fv[1] == "2:SAI_PORT_INTERFACE_TYPE_CR2,SAI_PORT_INTERFACE_TYPE_CR4" + + # set admin up + cfvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + ctbl.set("Ethernet0", cfvs) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_storm_control.py b/tests/test_storm_control.py index 76deef9268..ec4da04917 100644 --- a/tests/test_storm_control.py +++ b/tests/test_storm_control.py @@ -211,7 +211,7 @@ def test_warm_restart_all_interfaces(self,dvs,testlog): dvs.warm_restart_swss("true") # freeze orchagent for warm restart - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" time.sleep(2) diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index e70b495b62..1dae2b68ff 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -3,6 +3,7 @@ import ast import time import pytest +import buffer_model class TestVirtualChassis(object): @@ -852,7 +853,66 @@ def test_chassis_system_lag_id_allocator_del_id(self, vct): assert len(lagmemberkeys) == 0, "Stale system lag member entries in asic db" break - + + def test_chassis_add_remove_ports(self, vct): + """Test removing and adding a port in a VOQ chassis. + + Test validates that when a port is created the port is removed from the default vlan. + """ + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + num_ports = len(asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + # Get the port info we'll flap + port = config_db.get_keys('PORT')[0] + port_info = config_db.get_entry("PORT", port) + + # Remove port's other configs + pgs = config_db.get_keys('BUFFER_PG') + queues = config_db.get_keys('BUFFER_QUEUE') + for key in pgs: + if port in key: + config_db.delete_entry('BUFFER_PG', key) + app_db.wait_for_deleted_entry('BUFFER_PG_TABLE', key) + + for key in queues: + if port in key: + config_db.delete_entry('BUFFER_QUEUE', key) + app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + # Remove port + config_db.delete_entry('PORT', port) + app_db.wait_for_deleted_entry('PORT_TABLE', port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports-1) + assert len(num) == num_ports-1 + + marker = dvs.add_log_marker() + + # Create port + config_db.update_entry("PORT", port, port_info) + app_db.wait_for_entry("PORT_TABLE", port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports) + assert len(num) == num_ports + + # Check that we see the logs for removing default vlan + matching_log = "removeDefaultVlanMembers: Remove 0 VLAN members from default VLAN" + _, logSeen = dvs.runcmd( [ "sh", "-c", + "awk '/{}/,ENDFILE {{print;}}' /var/log/syslog | grep '{}' | wc -l".format( marker, matching_log ) ] ) + assert logSeen.strip() == "1" + + buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 9aba590ec1..8a83d59925 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -140,11 +140,11 @@ def delete_vnet_local_routes(dvs, prefix, vnet_name): time.sleep(2) -def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile=""): - set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile) +def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): + set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix) -def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile=""): +def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) attrs = [ @@ -163,6 +163,15 @@ def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor= if profile: attrs.append(('profile', profile)) + if primary: + attrs.append(('primary', primary)) + + if monitoring: + attrs.append(('monitoring', monitoring)) + + if adv_prefix: + attrs.append(('adv_prefix', adv_prefix)) + tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") fvs = swsscommon.FieldValuePairs(attrs) tbl.set("%s|%s" % (vnet_name, prefix), fvs) @@ -317,7 +326,7 @@ def delete_phy_interface(dvs, ifname, ipaddr): time.sleep(2) -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False): +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False, overlay_dmac=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -333,6 +342,9 @@ def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_pre if advertise_prefix: attrs.append(('advertise_prefix', 'true')) + if overlay_dmac: + attrs.append(('overlay_dmac', overlay_dmac)) + # create the VXLAN tunnel Term entry in Config DB create_entry_tbl( conf_db, @@ -534,6 +546,7 @@ class VnetVxlanVrfTunnel(object): ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" + APP_VNET_MONITOR = "VNET_MONITOR_TABLE" def __init__(self): self.tunnel_map_ids = set() @@ -948,7 +961,21 @@ def _access_function(): return True - + def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = prefix + ':' + endpoint + check_object(app_db, self.APP_VNET_MONITOR, key, + { + "packet_type": packet_type, + "overlay_dmac" : overlay_dmac + } + ) + return True + def check_custom_monitor_deleted(self, dvs, prefix, endpoint): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = prefix + ':' + endpoint + check_deleted_object(app_db, self.APP_VNET_MONITOR, key) + class TestVnetOrch(object): def get_vnet_obj(self): @@ -2420,6 +2447,37 @@ def test_vnet_orch_17(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet17') vnet_obj.check_del_vnet_entry(dvs, 'Vnet17') + ''' + Test 18 - Test for vxlan custom monitoring config. + ''' + def test_vnet_orch_18(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_18' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet18', tunnel_name, '10009', "", overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, 'Vnet18') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet18', '10009') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet18', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3',primary ='9.0.0.1',monitoring='custom', adv_prefix='100.100.1.1/27') + + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.1", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.2", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.1/32", "9.1.0.3", "vxlan", "22:33:33:44:44:66") + + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet18') + + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/warmrestart/warmRestartAssist.cpp b/warmrestart/warmRestartAssist.cpp index 988f8279db..9b1a8dfddd 100644 --- a/warmrestart/warmRestartAssist.cpp +++ b/warmrestart/warmRestartAssist.cpp @@ -208,10 +208,31 @@ void AppRestartAssist::insertToMap(string tableName, string key, vectorsecond, SAME); + auto state = getCacheEntryState(found->second); + /* + * In case an entry has been updated for more than once with the same value but different from the stored one, + * keep the state as NEW. + * Eg. + * Assume the entry's value that is restored from last warm reboot is V0. + * 1. The first update with value V1 is received and handled by the above `if (found != appTableCacheMap[tableName].end())` branch, + * - state is set to NEW + * - value is updated to V1 + * 2. The second update with the same value V1 is received and handled by this branch + * - Originally, state was set to SAME, which is wrong because V1 is different from the stored value V0 + * - The correct logic should be: set the state to same only if the state is not NEW + * This is a very rare case because in most of times the entry won't be updated for multiple times + */ + if (state == NEW) + { + SWSS_LOG_NOTICE("%s, found key: %s, it has been updated for the second time, keep state as NEW", + tableName.c_str(), key.c_str()); + } + else + { + SWSS_LOG_INFO("%s, found key: %s, same value", tableName.c_str(), key.c_str()); + // mark as SAME flag + setCacheEntryState(found->second, SAME); + } } } else