From d8fadc6cd4e3f898dd836ec5c528c4ca406122c8 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Mon, 18 Apr 2022 14:02:18 +0800 Subject: [PATCH] [QoS] Resolve an issue in the sequence where a referenced object removed and then the referencing object deleting and then re-adding (#2210) - What I did Resolve an issue in the following scenario Suppose object a in table A references object b in table B. When a user is going to remove items in table A and B, the notifications can be received in the following order: The notification of removing object b Then the notification of removing object a And then the notification of re-adding object a The notification of re-adding object b. Object b can not be removed in the 1st step because it is still referenced by object a. In case the system is busy, the notification of removing a remains in m_toSync when the notification of re-adding it is coming, which results in both notifications being handled together and the reference to object b not being cleared. As a result, notification of removing b will never be handled and remain in m_toSync forever. Solution: Introduce a flag pending remove indicating whether an object is about to be removed but pending on some reference. pending remove is set once a DEL notification is received for an object with non-zero reference. When resolving references in step 3, a pending remove object will be skipped and the notification will remain in m_toSync. SET operation will not be carried out in case there is a pending remove flag on the object to be set. By doing so, when object a is re-added in step 3, it can not retrieve the dependent object b. And step 1 will be handled and drained successfully. - Why I did it Fix bug. - How I verified it Mock test and manually test (eg. config qos reload) Signed-off-by: Stephen Sun --- orchagent/bufferorch.cpp | 14 + orchagent/orch.cpp | 5 + orchagent/orch.h | 1 + orchagent/qosorch.cpp | 86 +----- orchagent/qosorch.h | 3 - tests/mock_tests/Makefile.am | 1 + tests/mock_tests/bufferorch_ut.cpp | 409 +++++++++++++++++++++++++++++ tests/mock_tests/qosorch_ut.cpp | 170 ++++++++++++ 8 files changed, 614 insertions(+), 75 deletions(-) create mode 100644 tests/mock_tests/bufferorch_ut.cpp diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 56f02b2b52f4..f9b91e7a16cb 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -389,6 +389,11 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { sai_object = (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId; SWSS_LOG_DEBUG("found existing object:%s of type:%s", object_name.c_str(), map_type_name.c_str()); + if ((*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", map_type_name.c_str(), object_name.c_str()); + return task_process_status::task_need_retry; + } } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); if (object_name == "ingress_zero_pool") @@ -565,6 +570,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = false; // Here we take the PFC watchdog approach to update the COUNTERS_DB metadata (e.g., PFC_WD_DETECTION_TIME per queue) // at initialization (creation and registration phase) // Specifically, we push the buffer pool name to oid mapping upon the creation of the oid @@ -579,6 +585,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { auto hint = objectReferenceInfo(m_buffer_type_maps, map_type_name, object_name); SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", object_name.c_str(), hint.c_str()); + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = true; return task_process_status::task_need_retry; } @@ -647,6 +654,11 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup { sai_object = (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId; SWSS_LOG_DEBUG("found existing object:%s of type:%s", object_name.c_str(), map_type_name.c_str()); + if ((*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", map_type_name.c_str(), object_name.c_str()); + return task_process_status::task_need_retry; + } } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); if (op == SET_COMMAND) @@ -787,6 +799,7 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } } (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = false; SWSS_LOG_NOTICE("Created buffer profile %s with type %s", object_name.c_str(), map_type_name.c_str()); } @@ -799,6 +812,7 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup { auto hint = objectReferenceInfo(m_buffer_type_maps, map_type_name, object_name); SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", object_name.c_str(), hint.c_str()); + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = true; return task_process_status::task_need_retry; } diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index a9c5c9afcb97..f04a438a5da2 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -357,6 +357,11 @@ bool Orch::parseReference(type_map &type_maps, string &ref_in, const string &typ SWSS_LOG_INFO("map:%s does not contain object with name:%s\n", type_name.c_str(), ref_in.c_str()); return false; } + if (obj_it->second.m_pendingRemove) + { + SWSS_LOG_NOTICE("map:%s contains a pending removed object %s, skip\n", type_name.c_str(), ref_in.c_str()); + return false; + } object_name = ref_in; SWSS_LOG_DEBUG("parsed: type_name:%s, object_name:%s", type_name.c_str(), object_name.c_str()); return true; diff --git a/orchagent/orch.h b/orchagent/orch.h index f6b13aa9c684..a808fef32684 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -66,6 +66,7 @@ typedef struct // multiple objects being referenced are separated by ',' std::map m_objsReferencingByMe; sai_object_id_t m_saiObjectId; + bool m_pendingRemove; } referenced_object; typedef std::map object_reference_map; diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 616c08f1ae39..844ae769f7ed 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -110,6 +110,11 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFiel if (QosOrch::getTypeMap()[qos_map_type_name]->find(qos_object_name) != QosOrch::getTypeMap()[qos_map_type_name]->end()) { sai_object = (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_saiObjectId; + if ((*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", qos_map_type_name.c_str(), qos_object_name.c_str()); + return task_process_status::task_need_retry; + } } if (op == SET_COMMAND) { @@ -138,6 +143,7 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFiel return task_process_status::task_failed; } (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_saiObjectId = sai_object; + (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_pendingRemove = false; SWSS_LOG_NOTICE("Created [%s:%s]", qos_map_type_name.c_str(), qos_object_name.c_str()); } freeAttribResources(attributes); @@ -153,6 +159,7 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFiel { auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_pendingRemove = true; return task_process_status::task_need_retry; } if (!removeQosItem(sai_object)) @@ -1121,6 +1128,11 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpField SWSS_LOG_ERROR("Error sai_object must exist for key %s", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if ((*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", qos_map_type_name.c_str(), qos_object_name.c_str()); + return task_process_status::task_need_retry; + } } if (op == SET_COMMAND) { @@ -1228,6 +1240,7 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpField } SWSS_LOG_NOTICE("Created [%s:%s]", qos_map_type_name.c_str(), qos_object_name.c_str()); (*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_saiObjectId = sai_object; + (*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_pendingRemove = false; } } else if (op == DEL_COMMAND) @@ -1241,6 +1254,7 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpField { auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + (*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_pendingRemove = true; return task_process_status::task_need_retry; } sai_status = sai_scheduler_api->remove_scheduler(sai_object); @@ -1613,78 +1627,6 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsVal return task_process_status::task_success; } -bool QosOrch::applyMapToPort(Port &port, sai_attr_id_t attr_id, sai_object_id_t map_id) -{ - SWSS_LOG_ENTER(); - - sai_attribute_t attr; - attr.id = attr_id; - attr.value.oid = map_id; - - sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed setting sai object:%" PRIx64 " for port:%s, status:%d", map_id, port.m_alias.c_str(), status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } - } - return true; -} - -task_process_status QosOrch::ResolveMapAndApplyToPort( - Port &port, - sai_port_attr_t port_attr, - string field_name, - KeyOpFieldsValuesTuple &tuple, - string op) -{ - SWSS_LOG_ENTER(); - - sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; - string object_name; - bool result; - ref_resolve_status resolve_result = resolveFieldRefValue(m_qos_maps, field_name, - qos_to_ref_table_map.at(field_name), tuple, sai_object, object_name); - if (ref_resolve_status::success == resolve_result) - { - if (op == SET_COMMAND) - { - result = applyMapToPort(port, port_attr, sai_object); - } - else if (op == DEL_COMMAND) - { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applyMapToPort(port, port_attr, SAI_NULL_OBJECT_ID); - } - else - { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } - if (!result) - { - SWSS_LOG_ERROR("Failed setting field:%s to port:%s, line:%d", field_name.c_str(), port.m_alias.c_str(), __LINE__); - return task_process_status::task_failed; - } - SWSS_LOG_DEBUG("Applied field:%s to port:%s, line:%d", field_name.c_str(), port.m_alias.c_str(), __LINE__); - return task_process_status::task_success; - } - else if (resolve_result != ref_resolve_status::field_not_found) - { - if(ref_resolve_status::not_resolved == resolve_result) - { - SWSS_LOG_INFO("Missing or invalid %s reference", field_name.c_str()); - return task_process_status::task_need_retry; - } - SWSS_LOG_ERROR("Resolving %s reference failed", field_name.c_str()); - return task_process_status::task_failed; - } - return task_process_status::task_success; -} - task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 0401cc6e6f50..053c461886bb 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -180,11 +180,8 @@ class QosOrch : public Orch sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id); - bool applyMapToPort(Port &port, sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); bool applySchedulerToQueueSchedulerGroup(Port &port, size_t queue_ind, sai_object_id_t scheduler_profile_id); bool applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_id_t sai_wred_profile); - task_process_status ResolveMapAndApplyToPort(Port &port,sai_port_attr_t port_attr, - string field_name, KeyOpFieldsValuesTuple &tuple, string op); private: qos_table_handler_map m_qos_handler_map; diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 15bc47bd700d..42b9f17110f4 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -25,6 +25,7 @@ tests_SOURCES = aclorch_ut.cpp \ portsorch_ut.cpp \ routeorch_ut.cpp \ qosorch_ut.cpp \ + bufferorch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ ut_saihelper.cpp \ diff --git a/tests/mock_tests/bufferorch_ut.cpp b/tests/mock_tests/bufferorch_ut.cpp new file mode 100644 index 000000000000..845fe77d686a --- /dev/null +++ b/tests/mock_tests/bufferorch_ut.cpp @@ -0,0 +1,409 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" + +extern string gMySwitchType; + + +namespace bufferorch_test +{ + using namespace std; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + struct BufferOrchTest : public ::testing::Test + { + BufferOrchTest() + { + } + + void CheckDependency(const string &referencingTableName, const string &referencingObjectName, const string &field, const string &dependentTableName, const string &dependentObjectNames="") + { + auto &bufferTypeMaps = BufferOrch::m_buffer_type_maps; + auto &referencingTable = (*bufferTypeMaps[referencingTableName]); + auto &dependentTable = (*bufferTypeMaps[dependentTableName]); + + if (dependentObjectNames.empty()) + { + ASSERT_TRUE(referencingTable[referencingObjectName].m_objsReferencingByMe[field].empty()); + } + else + { + auto objects = tokenize(dependentObjectNames, ','); + string reference; + for (auto &object : objects) + { + reference += dependentTableName + ":" + object + ","; + ASSERT_EQ(dependentTable[object].m_objsDependingOnMe.count(referencingObjectName), 1); + } + //reference.pop(); + ASSERT_EQ(referencingTable[referencingObjectName].m_objsReferencingByMe[field] + ",", reference); + } + } + + void RemoveItem(const string &table, const string &key) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(table)); + consumer->addToSync(entries); + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + + bufferPoolTable.set("ingress_lossless_pool", + { + {"size", "1024000"}, + {"mode", "dynamic"}, + {"type", "egress"} + }); + bufferPoolTable.set("ingress_lossy_pool", + { + {"size", "1024000"}, + {"mode", "dynamic"}, + {"type", "egress"} + }); + bufferProfileTable.set("ingress_lossless_profile", + { + {"pool", "ingress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }); + bufferProfileTable.set("ingress_lossy_profile", + { + {"pool", "ingress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }); + + gBufferOrch->addExistingData(&bufferPoolTable); + gBufferOrch->addExistingData(&bufferProfileTable); + + static_cast(gBufferOrch)->doTask(); + } + + void TearDown() override + { + auto buffer_maps = BufferOrch::m_buffer_type_maps; + for (auto &i : buffer_maps) + { + i.second->clear(); + } + + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gQosOrch; + gQosOrch = nullptr; + + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(BufferOrchTest, BufferOrchTestBufferPgReferencingObjRemoveThenAdd) + { + vector ts; + std::deque entries; + Table bufferPgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + + bufferPgTable.set("Ethernet0:0", + { + {"profile", "ingress_lossy_profile"} + }); + gBufferOrch->addExistingData(&bufferPgTable); + static_cast(gBufferOrch)->doTask(); + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile"); + + // Remove referenced obj + entries.push_back({"ingress_lossy_profile", "DEL", {}}); + auto bufferProfileConsumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PROFILE_TABLE_NAME)); + bufferProfileConsumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PROFILE_TABLE + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile"); + // Make sure the notification isn't drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|DEL"); + ts.clear(); + + // Remove and readd referencing obj + entries.push_back({"Ethernet0:0", "DEL", {}}); + entries.push_back({"Ethernet0:0", "SET", + { + {"profile", "ingress_lossy_profile"} + }}); + auto bufferPgConsumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); + bufferPgConsumer->addToSync(entries); + entries.clear(); + // Drain the BUFFER_PG_TABLE + static_cast(gBufferOrch)->doTask(); + // Drain the BUFFER_PROFILE_TABLE which contains items need to retry + static_cast(gBufferOrch)->doTask(); + // The dependency should be removed + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "BUFFER_PG_TABLE:Ethernet0:0|SET|profile:ingress_lossy_profile"); + ts.clear(); + + // Re-create referenced obj + entries.push_back({"ingress_lossy_profile", "SET", + { + {"pool", "ingress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }}); + bufferProfileConsumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PROFILE_TABLE table + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile"); + + // All items have been drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + } + + TEST_F(BufferOrchTest, BufferOrchTestReferencingObjRemoveThenAdd) + { + vector ts; + std::deque entries; + Table bufferProfileListTable = Table(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + bufferProfileListTable.set("Ethernet0", + { + {"profile_list", "ingress_lossy_profile,ingress_lossless_profile"} + }); + gBufferOrch->addExistingData(&bufferProfileListTable); + static_cast(gBufferOrch)->doTask(); + CheckDependency(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", + APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile,ingress_lossless_profile"); + + // Remove and recreate the referenced profile + entries.push_back({"ingress_lossy_profile", "DEL", {}}); + entries.push_back({"ingress_lossy_profile", "SET", + { + {"pool", "ingress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PROFILE_TABLE table + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", + APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile,ingress_lossless_profile"); + // Make sure the notification isn't drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 2); + ASSERT_EQ(ts[0], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|DEL"); + ASSERT_EQ(ts[1], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|SET|pool:ingress_lossy_pool|size:0|dynamic_th:0"); + ts.clear(); + + // Remove and recreate the referenced pool + entries.push_back({"ingress_lossy_pool", "DEL", {}}); + entries.push_back({"ingress_lossy_pool", "SET", + { + {"type", "ingress"}, + {"size", "1024000"}, + {"mode", "dynamic"} + }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_POOL_TABLE table + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile", "pool", + APP_BUFFER_POOL_TABLE_NAME, "ingress_lossy_pool"); + // Make sure the notification isn't drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 4); + ASSERT_EQ(ts[0], "BUFFER_POOL_TABLE:ingress_lossy_pool|DEL"); + ASSERT_EQ(ts[1], "BUFFER_POOL_TABLE:ingress_lossy_pool|SET|type:ingress|size:1024000|mode:dynamic"); + ASSERT_EQ(ts[2], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|DEL"); + ASSERT_EQ(ts[3], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|SET|pool:ingress_lossy_pool|size:0|dynamic_th:0"); + ts.clear(); + } +} diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index 0644058a469b..a2152f8d7c39 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -759,6 +759,176 @@ namespace qosorch_test ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, SAI_NULL_OBJECT_ID); } + TEST_F(QosOrchTest, QosOrchTestPortQosMapReferencingObjRemoveThenAdd) + { + vector ts; + std::deque entries; + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"} + }); + gQosOrch->addExistingData(&portQosMapTable); + static_cast(gQosOrch)->doTask(); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + + // Remove referenced obj + entries.push_back({"AZURE", "DEL", {}}); + auto dscpToTcMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + dscpToTcMapConsumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + // Make sure the notification isn't drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "DSCP_TO_TC_MAP|AZURE|DEL"); + ts.clear(); + + // Remove and readd referencing obj + entries.push_back({"Ethernet0", "DEL", {}}); + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + // Drain the PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Drain the DSCP_TO_TC_MAP table which contains items need to retry + static_cast(gQosOrch)->doTask(); + // The dependency should be removed + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "PORT_QOS_MAP|Ethernet0|SET|dscp_to_tc_map:AZURE"); + ts.clear(); + + // Re-create referenced obj + entries.push_back({"AZURE", "SET", + { + {"1", "0"} + }}); + dscpToTcMapConsumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + + // All items have been drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Remove and recreate the referenced obj + entries.push_back({"AZURE", "DEL", {}}); + entries.push_back({"AZURE", "SET", + { + {"1", "0"} + }}); + dscpToTcMapConsumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + // Make sure the notification isn't drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 2); + ASSERT_EQ(ts[0], "DSCP_TO_TC_MAP|AZURE|DEL"); + ASSERT_EQ(ts[1], "DSCP_TO_TC_MAP|AZURE|SET|1:0"); + ts.clear(); + } + + TEST_F(QosOrchTest, QosOrchTestQueueReferencingObjRemoveThenAdd) + { + vector ts; + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|0", + { + {"scheduler", "scheduler.0"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + + // Remove referenced obj + entries.push_back({"scheduler.0", "DEL", {}}); + auto schedulerConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME)); + schedulerConsumer->addToSync(entries); + entries.clear(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "SCHEDULER|scheduler.0|DEL"); + ts.clear(); + + // Remove and readd referencing obj + entries.push_back({"Ethernet0|0", "DEL", {}}); + entries.push_back({"Ethernet0|0", "SET", + { + {"scheduler", "scheduler.0"} + }}); + auto queueConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // The dependency should be removed + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "QUEUE|Ethernet0|0|SET|scheduler:scheduler.0"); + ts.clear(); + + // Re-create referenced obj + entries.push_back({"scheduler.0", "SET", + { + {"type", "DWRR"}, + {"weight", "14"} + }}); + schedulerConsumer->addToSync(entries); + entries.clear(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + + // All items have been drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Remove and then re-add the referenced obj + entries.push_back({"scheduler.0", "DEL", {}}); + entries.push_back({"scheduler.0", "SET", + { + {"type", "DWRR"}, + {"weight", "14"} + }}); + schedulerConsumer->addToSync(entries); + entries.clear(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 2); + ASSERT_EQ(ts[0], "SCHEDULER|scheduler.0|DEL"); + ASSERT_EQ(ts[1], "SCHEDULER|scheduler.0|SET|type:DWRR|weight:14"); + ts.clear(); + } + TEST_F(QosOrchTest, QosOrchTestGlobalDscpToTcMap) { // Make sure dscp to tc map is correct