From 61a7f4e0a1bf95461e6360701cb59b5fe16abce3 Mon Sep 17 00:00:00 2001 From: Petro Bratash <68950226+bratashX@users.noreply.github.com> Date: Tue, 23 Mar 2021 19:02:16 +0200 Subject: [PATCH] Handle the clear request for 'Q_SHARED_ALL' (#1653) What I did Add handling the clear request for 'Q_SHARED_ALL' Why I did it In Azure/sonic-utilities#1149 added the following new commands sonic-clear queue persistent-watermark all sonic-clear queue watermark all Without these changes, commands will result in https://github.com/Azure/sonic-swss/blob/master/orchagent/watermarkorch.cpp#L221 Signed-off-by: Petro Bratash --- orchagent/watermarkorch.cpp | 22 ++++++++++++++++++---- orchagent/watermarkorch.h | 1 + tests/test_watermark.py | 32 ++++++++++++++++++++++++++++++-- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/orchagent/watermarkorch.cpp b/orchagent/watermarkorch.cpp index 8b8246ce35..4d0aa80bf6 100644 --- a/orchagent/watermarkorch.cpp +++ b/orchagent/watermarkorch.cpp @@ -12,6 +12,7 @@ #define CLEAR_PG_SHARED_REQUEST "PG_SHARED" #define CLEAR_QUEUE_SHARED_UNI_REQUEST "Q_SHARED_UNI" #define CLEAR_QUEUE_SHARED_MULTI_REQUEST "Q_SHARED_MULTI" +#define CLEAR_QUEUE_SHARED_ALL_REQUEST "Q_SHARED_ALL" #define CLEAR_BUFFER_POOL_REQUEST "BUFFER_POOL" #define CLEAR_HEADROOM_POOL_REQUEST "HEADROOM_POOL" @@ -92,7 +93,7 @@ void WatermarkOrch::doTask(Consumer &consumer) void WatermarkOrch::handleWmConfigUpdate(const std::string &key, const std::vector &fvt) { - SWSS_LOG_ENTER(); + SWSS_LOG_ENTER(); if (key == "TELEMETRY_INTERVAL") { for (std::pair, std::basic_string > i: fvt) @@ -153,7 +154,7 @@ void WatermarkOrch::doTask(NotificationConsumer &consumer) init_pg_ids(); } - if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty()) + if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty() and m_all_queue_ids.empty()) { init_queue_ids(); } @@ -204,6 +205,12 @@ void WatermarkOrch::doTask(NotificationConsumer &consumer) "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_multicast_queue_ids); } + else if (data == CLEAR_QUEUE_SHARED_ALL_REQUEST) + { + clearSingleWm(table, + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", + m_all_queue_ids); + } else if (data == CLEAR_BUFFER_POOL_REQUEST) { clearSingleWm(table, @@ -232,7 +239,7 @@ void WatermarkOrch::doTask(SelectableTimer &timer) init_pg_ids(); } - if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty()) + if (m_multicast_queue_ids.empty() and m_unicast_queue_ids.empty() and m_all_queue_ids.empty()) { init_queue_ids(); } @@ -261,6 +268,9 @@ void WatermarkOrch::doTask(SelectableTimer &timer) clearSingleWm(m_periodicWatermarkTable.get(), "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_multicast_queue_ids); + clearSingleWm(m_periodicWatermarkTable.get(), + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", + m_all_queue_ids); clearSingleWm(m_periodicWatermarkTable.get(), "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES", gBufferOrch->getBufferPoolNameOidMap()); @@ -299,10 +309,14 @@ void WatermarkOrch::init_queue_ids() { m_unicast_queue_ids.push_back(id); } - else + else if (fv.second == "SAI_QUEUE_TYPE_MULTICAST") { m_multicast_queue_ids.push_back(id); } + else if (fv.second == "SAI_QUEUE_TYPE_ALL") + { + m_all_queue_ids.push_back(id); + } } } diff --git a/orchagent/watermarkorch.h b/orchagent/watermarkorch.h index dfbc6574f5..43d0939001 100644 --- a/orchagent/watermarkorch.h +++ b/orchagent/watermarkorch.h @@ -68,6 +68,7 @@ class WatermarkOrch : public Orch std::vector m_unicast_queue_ids; std::vector m_multicast_queue_ids; + std::vector m_all_queue_ids; std::vector m_pg_ids; }; diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 33e4154c89..6d7c993125 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -159,14 +159,18 @@ def set_up(self, dvs): self.uc_q = [] self.mc_q = [] + self.all_q = [] for q in self.qs: - if self.qs.index(q) % 16 < 8: + if self.qs.index(q) % 16 < 5: tbl.set('', [(q, "SAI_QUEUE_TYPE_UNICAST")]) self.uc_q.append(q) - else: + elif self.qs.index(q) % 16 < 10: tbl.set('', [(q, "SAI_QUEUE_TYPE_MULTICAST")]) self.mc_q.append(q) + else: + tbl.set('', [(q, "SAI_QUEUE_TYPE_ALL")]) + self.all_q.append(q) def test_telemetry_period(self, dvs): self.setup_dbs(dvs) @@ -279,6 +283,30 @@ def test_clear(self, dvs): self.verify_value(dvs, self.mc_q, WmTables.persistent, SaiWmStats.queue_shared, "288") self.verify_value(dvs, self.uc_q, WmTables.user, SaiWmStats.queue_shared, "288") self.verify_value(dvs, self.mc_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.all_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.all_q, WmTables.persistent, SaiWmStats.queue_shared, "288") + + # clear queue all watermark, and verify that multicast and unicast watermarks are not affected + + # clear persistent all watermark + exitcode, output = dvs.runcmd("sonic-clear queue persistent-watermark all") + time.sleep(1) + assert exitcode == 0, "CLI failure: %s" % output + # make sure it cleared + self.verify_value(dvs, self.all_q, WmTables.persistent, SaiWmStats.queue_shared, "0") + + # clear user all watermark + exitcode, output = dvs.runcmd("sonic-clear queue watermark all") + time.sleep(1) + assert exitcode == 0, "CLI failure: %s" % output + # make sure it cleared + self.verify_value(dvs, self.all_q, WmTables.user, SaiWmStats.queue_shared, "0") + + # make sure the rest is untouched + self.verify_value(dvs, self.mc_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.mc_q, WmTables.persistent, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.uc_q, WmTables.user, SaiWmStats.queue_shared, "288") + self.verify_value(dvs, self.uc_q, WmTables.persistent, SaiWmStats.queue_shared, "0") self.enable_unittests(dvs, "false")