Skip to content

Commit

Permalink
Fix review comments
Browse files Browse the repository at this point in the history
Signed-off-by: Stephen Sun <stephens@nvidia.com>
  • Loading branch information
stephenxs committed Nov 17, 2021
1 parent 8df51bb commit 84ed8cc
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 36 deletions.
2 changes: 1 addition & 1 deletion orchagent/orch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,7 @@ bool Orch::parseIndexRange(const string &input, sai_uint32_t &range_low, sai_uin
*
* Example:
* Input idsMap: 3-4
* Return: 00001100b
* Return: 00011000b
*/
unsigned long Orch::generateBitMapFromIdsStr(const string &idsStr)
{
Expand Down
16 changes: 5 additions & 11 deletions orchagent/portsorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4071,29 +4071,24 @@ void PortsOrch::initializePriorityGroups(Port &port)
SWSS_LOG_INFO("Get priority groups for port %s", port.m_alias.c_str());
}

void PortsOrch::initializePortMaximumHeadroom(Port &port)
void PortsOrch::initializePortBufferMaximumParameters(Port &port)
{
sai_attribute_t attr;
vector<FieldValueTuple> fvVector;

attr.id = SAI_PORT_ATTR_QOS_MAXIMUM_HEADROOM_SIZE;

sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr);
if (status != SAI_STATUS_SUCCESS)
{
SWSS_LOG_NOTICE("Unable to get the maximum headroom for port %s rv:%d, ignored", port.m_alias.c_str(), status);
return;
}

port.m_maximum_headroom = attr.value.u32;
}

void PortsOrch::initializePortBufferMaximumParameters(Port &port)
{
vector<FieldValueTuple> fvVector;
if (port.m_maximum_headroom > 0)
else
{
port.m_maximum_headroom = attr.value.u32;
fvVector.emplace_back("max_headroom_size", to_string(port.m_maximum_headroom));
}

fvVector.emplace_back("max_priority_groups", to_string(port.m_priority_group_ids.size()));
fvVector.emplace_back("max_queues", to_string(port.m_queue_ids.size()));

Expand All @@ -4108,7 +4103,6 @@ bool PortsOrch::initializePort(Port &port)

initializePriorityGroups(port);
initializeQueues(port);
initializePortMaximumHeadroom(port);
initializePortBufferMaximumParameters(port);

/* Create host interface */
Expand Down
1 change: 0 additions & 1 deletion orchagent/portsorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,6 @@ class PortsOrch : public Orch, public Subject

bool initializePort(Port &port);
void initializePriorityGroups(Port &port);
void initializePortMaximumHeadroom(Port &port);
void initializePortBufferMaximumParameters(Port &port);
void initializeQueues(Port &port);

Expand Down
47 changes: 24 additions & 23 deletions tests/test_buffer_dynamic.py
Original file line number Diff line number Diff line change
Expand Up @@ -643,29 +643,30 @@ def test_removeBufferPool(self, dvs, testlog):
self.counter_db = dvs.get_counters_db()
self.flex_db = dvs.get_flex_db()

# Create a new pool
self.config_db.update_entry('BUFFER_POOL', 'ingress_test_pool', {'size': '0', 'mode': 'static', 'type': 'ingress'})

# Whether counterpoll is enabled? Enable it if not.
flex_counter = self.config_db.get_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK")
counter_poll_disabled = (not flex_counter or flex_counter["FLEX_COUNTER_STATUS"] != 'enable')
if counter_poll_disabled:
self.config_db.update_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK", {"FLEX_COUNTER_STATUS": "enable"})

# Check whether counter poll has been enabled
time.sleep(1)
poolmap = self.counter_db.wait_for_entry("COUNTERS_BUFFER_POOL_NAME_MAP", "")
assert poolmap["ingress_test_pool"]
self.flex_db.wait_for_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(poolmap["ingress_test_pool"]))

self.config_db.delete_entry('BUFFER_POOL', 'ingress_test_pool')
oid_to_remove = poolmap.pop('ingress_test_pool')
self.counter_db.wait_for_field_match("COUNTERS_BUFFER_POOL_NAME_MAP", "", poolmap)
self.flex_db.wait_for_deleted_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(oid_to_remove))

# Clean up: disable counterpoll if it was disabled
if counter_poll_disabled:
self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK")
try:
# Create a new pool
self.config_db.update_entry('BUFFER_POOL', 'ingress_test_pool', {'size': '0', 'mode': 'static', 'type': 'ingress'})

# Whether counterpoll is enabled? Enable it if not.
flex_counter = self.config_db.get_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK")
counter_poll_disabled = (not flex_counter or flex_counter["FLEX_COUNTER_STATUS"] != 'enable')
if counter_poll_disabled:
self.config_db.update_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK", {"FLEX_COUNTER_STATUS": "enable"})

# Check whether counter poll has been enabled
time.sleep(1)
poolmap = self.counter_db.wait_for_entry("COUNTERS_BUFFER_POOL_NAME_MAP", "")
assert poolmap["ingress_test_pool"]
self.flex_db.wait_for_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(poolmap["ingress_test_pool"]))

self.config_db.delete_entry('BUFFER_POOL', 'ingress_test_pool')
oid_to_remove = poolmap.pop('ingress_test_pool')
self.counter_db.wait_for_field_match("COUNTERS_BUFFER_POOL_NAME_MAP", "", poolmap)
self.flex_db.wait_for_deleted_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(oid_to_remove))
finally:
# Clean up: disable counterpoll if it was disabled
if counter_poll_disabled:
self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK")

def test_bufferPortMaxParameter(self, dvs, testlog):
self.setup_db(dvs)
Expand Down

0 comments on commit 84ed8cc

Please sign in to comment.