diff --git a/orchagent/watermarkorch.cpp b/orchagent/watermarkorch.cpp index 8b8246ce35e..4d0aa80bf6a 100644 --- a/orchagent/watermarkorch.cpp +++ b/orchagent/watermarkorch.cpp @@ -12,6 +12,7 @@ #define CLEAR_PG_SHARED_REQUEST "PG_SHARED" #define CLEAR_QUEUE_SHARED_UNI_REQUEST "Q_SHARED_UNI" #define CLEAR_QUEUE_SHARED_MULTI_REQUEST "Q_SHARED_MULTI" +#define CLEAR_QUEUE_SHARED_ALL_REQUEST "Q_SHARED_ALL" #define CLEAR_BUFFER_POOL_REQUEST "BUFFER_POOL" #define CLEAR_HEADROOM_POOL_REQUEST "HEADROOM_POOL" @@ -92,7 +93,7 @@ void WatermarkOrch::doTask(Consumer &consumer) void WatermarkOrch::handleWmConfigUpdate(const std::string &key, const std::vector &fvt) { - SWSS_LOG_ENTER(); + SWSS_LOG_ENTER(); if (key == "TELEMETRY_INTERVAL") { for (std::pair, std::basic_string > i: fvt) @@ -153,7 +154,7 @@ void WatermarkOrch::doTask(NotificationConsumer &consumer) init_pg_ids(); } - if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty()) + if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty() and m_all_queue_ids.empty()) { init_queue_ids(); } @@ -204,6 +205,12 @@ void WatermarkOrch::doTask(NotificationConsumer &consumer) "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_multicast_queue_ids); } + else if (data == CLEAR_QUEUE_SHARED_ALL_REQUEST) + { + clearSingleWm(table, + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", + m_all_queue_ids); + } else if (data == CLEAR_BUFFER_POOL_REQUEST) { clearSingleWm(table, @@ -232,7 +239,7 @@ void WatermarkOrch::doTask(SelectableTimer &timer) init_pg_ids(); } - if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty()) + if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty() and m_all_queue_ids.empty()) { init_queue_ids(); } @@ -261,6 +268,9 @@ void WatermarkOrch::doTask(SelectableTimer &timer) clearSingleWm(m_periodicWatermarkTable.get(), "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_multicast_queue_ids); + clearSingleWm(m_periodicWatermarkTable.get(), + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", + m_all_queue_ids); clearSingleWm(m_periodicWatermarkTable.get(), "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES", gBufferOrch->getBufferPoolNameOidMap()); @@ -299,10 +309,14 @@ void WatermarkOrch::init_queue_ids() { m_unicast_queue_ids.push_back(id); } - else + else if (fv.second == "SAI_QUEUE_TYPE_MULTICAST") { m_multicast_queue_ids.push_back(id); } + else if (fv.second == "SAI_QUEUE_TYPE_ALL") + { + m_all_queue_ids.push_back(id); + } } } diff --git a/orchagent/watermarkorch.h b/orchagent/watermarkorch.h index dfbc6574f55..43d0939001c 100644 --- a/orchagent/watermarkorch.h +++ b/orchagent/watermarkorch.h @@ -68,6 +68,7 @@ class WatermarkOrch : public Orch std::vector m_unicast_queue_ids; std::vector m_multicast_queue_ids; + std::vector m_all_queue_ids; std::vector m_pg_ids; }; diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 33e4154c895..6d7c993125d 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -159,14 +159,18 @@ def set_up(self, dvs): self.uc_q = [] self.mc_q = [] + self.all_q = [] for q in self.qs: - if self.qs.index(q) % 16 < 8: + if self.qs.index(q) % 16 < 5: tbl.set('', [(q, "SAI_QUEUE_TYPE_UNICAST")]) self.uc_q.append(q) - else: + elif self.qs.index(q) % 16 < 10: tbl.set('', [(q, "SAI_QUEUE_TYPE_MULTICAST")]) self.mc_q.append(q) + else: + tbl.set('', [(q, "SAI_QUEUE_TYPE_ALL")]) + self.all_q.append(q) def test_telemetry_period(self, dvs): self.setup_dbs(dvs) @@ -279,6 +283,30 @@ def test_clear(self, dvs): self.verify_value(dvs, self.mc_q, WmTables.persistent, SaiWmStats.queue_shared, "288") self.verify_value(dvs, self.uc_q, WmTables.user, SaiWmStats.queue_shared, "288") self.verify_value(dvs, self.mc_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.all_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.all_q, WmTables.persistent, SaiWmStats.queue_shared, "288") + + # clear queue all watermark, and verify that multicast and unicast watermarks are not affected + + # clear persistent all watermark + exitcode, output = dvs.runcmd("sonic-clear queue persistent-watermark all") + time.sleep(1) + assert exitcode == 0, "CLI failure: %s" % output + # make sure it cleared + self.verify_value(dvs, self.all_q, WmTables.persistent, SaiWmStats.queue_shared, "0") + + # clear user all watermark + exitcode, output = dvs.runcmd("sonic-clear queue watermark all") + time.sleep(1) + assert exitcode == 0, "CLI failure: %s" % output + # make sure it cleared + self.verify_value(dvs, self.all_q, WmTables.user, SaiWmStats.queue_shared, "0") + + # make sure the rest is untouched + self.verify_value(dvs, self.mc_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.mc_q, WmTables.persistent, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.uc_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.uc_q, WmTables.persistent, SaiWmStats.queue_shared, "0") self.enable_unittests(dvs, "false")