Skip to content

Commit

Permalink
Revert "Add support for headroom pool watermark (#1453)"
Browse files Browse the repository at this point in the history
This reverts commit 599ed41.
  • Loading branch information
lguohan committed Dec 22, 2020
1 parent 5859499 commit cadf28f
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 73 deletions.
1 change: 0 additions & 1 deletion orchagent/bufferorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ extern sai_object_id_t gSwitchId;
static const vector<sai_buffer_pool_stat_t> bufferPoolWatermarkStatIds =
{
SAI_BUFFER_POOL_STAT_WATERMARK_BYTES,
SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES
};

type_map BufferOrch::m_buffer_type_maps = {
Expand Down
73 changes: 39 additions & 34 deletions orchagent/watermark_bufferpool.lua
Original file line number Diff line number Diff line change
Expand Up @@ -12,46 +12,51 @@ local persistent_table_name = 'PERSISTENT_WATERMARKS'
local periodic_table_name = 'PERIODIC_WATERMARKS'

local sai_buffer_pool_watermark_stat_name = 'SAI_BUFFER_POOL_STAT_WATERMARK_BYTES'
local sai_hdrm_pool_watermark_stat_name = 'SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES'

local rets = redis.call('SELECT', counters_db)
local rets = {}

redis.call('SELECT', counters_db)

-- Iterate through each buffer pool oid
local n = table.getn(KEYS)
for i = n, 1, -1 do
-- Get new watermark value from COUNTERS
local buffer_pool_wm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
local hdrm_pool_wm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)

-- Get last value from *_WATERMARKS
local user_buffer_pool_wm = redis.call('HGET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
local persistent_buffer_pool_wm = redis.call('HGET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
local periodic_buffer_pool_wm = redis.call('HGET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)

local user_hdrm_pool_wm = redis.call('HGET', user_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)
local persistent_hdrm_pool_wm = redis.call('HGET', persistent_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)
local periodic_hdrm_pool_wm = redis.call('HGET', periodic_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)

if buffer_pool_wm then
buffer_pool_wm = tonumber(buffer_pool_wm)

redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name,
user_buffer_pool_wm and math.max(buffer_pool_wm, user_buffer_pool_wm) or buffer_pool_wm)
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name,
persistent_buffer_pool_wm and math.max(buffer_pool_wm, persistent_buffer_pool_wm) or buffer_pool_wm)
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name,
periodic_buffer_pool_wm and math.max(buffer_pool_wm, periodic_buffer_pool_wm) or buffer_pool_wm)
end

if hdrm_pool_wm then
hdrm_pool_wm = tonumber(hdrm_pool_wm)

redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name,
user_hdrm_pool_wm and math.max(hdrm_pool_wm, user_hdrm_pool_wm) or hdrm_pool_wm)
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name,
persistent_hdrm_pool_wm and math.max(hdrm_pool_wm, persistent_hdrm_pool_wm) or hdrm_pool_wm)
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name,
periodic_hdrm_pool_wm and math.max(hdrm_pool_wm, periodic_hdrm_pool_wm) or hdrm_pool_wm)
local wm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
if wm then
wm = tonumber(wm)

-- Get last value from *_WATERMARKS
local user_wm_last = redis.call('HGET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)

-- Set higher value to *_WATERMARKS
if user_wm_last then
user_wm_last = tonumber(user_wm_last)
if wm > user_wm_last then
redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
else
redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end

local persistent_wm_last = redis.call('HGET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
if persistent_wm_last then
persistent_wm_last = tonumber(persistent_wm_last)
if wm > persistent_wm_last then
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
else
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end

local periodic_wm_last = redis.call('HGET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
if periodic_wm_last then
periodic_wm_last = tonumber(periodic_wm_last)
if wm > periodic_wm_last then
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
else
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
end
end

Expand Down
45 changes: 15 additions & 30 deletions orchagent/watermarkorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
#define CLEAR_QUEUE_SHARED_UNI_REQUEST "Q_SHARED_UNI"
#define CLEAR_QUEUE_SHARED_MULTI_REQUEST "Q_SHARED_MULTI"
#define CLEAR_BUFFER_POOL_REQUEST "BUFFER_POOL"
#define CLEAR_HEADROOM_POOL_REQUEST "HEADROOM_POOL"

extern PortsOrch *gPortsOrch;
extern BufferOrch *gBufferOrch;
Expand Down Expand Up @@ -183,38 +182,32 @@ void WatermarkOrch::doTask(NotificationConsumer &consumer)
if (data == CLEAR_PG_HEADROOM_REQUEST)
{
clearSingleWm(table,
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES",
m_pg_ids);
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES",
m_pg_ids);
}
else if (data == CLEAR_PG_SHARED_REQUEST)
{
clearSingleWm(table,
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES",
m_pg_ids);
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES",
m_pg_ids);
}
else if (data == CLEAR_QUEUE_SHARED_UNI_REQUEST)
{
clearSingleWm(table,
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_unicast_queue_ids);
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_unicast_queue_ids);
}
else if (data == CLEAR_QUEUE_SHARED_MULTI_REQUEST)
{
clearSingleWm(table,
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_multicast_queue_ids);
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_multicast_queue_ids);
}
else if (data == CLEAR_BUFFER_POOL_REQUEST)
{
clearSingleWm(table,
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
}
else if (data == CLEAR_HEADROOM_POOL_REQUEST)
{
clearSingleWm(table,
"SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
}
else
{
Expand Down Expand Up @@ -250,23 +243,15 @@ void WatermarkOrch::doTask(SelectableTimer &timer)
}

clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES",
m_pg_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES",
m_pg_ids);
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES", m_pg_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_unicast_queue_ids);
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES", m_pg_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_multicast_queue_ids);
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_unicast_queue_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_multicast_queue_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES", gBufferOrch->getBufferPoolNameOidMap());
SWSS_LOG_DEBUG("Periodic watermark cleared by timer!");
}
}
Expand Down
9 changes: 1 addition & 8 deletions tests/test_watermark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ class SaiWmStats:
pg_shared = "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES"
pg_headroom = "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES"
buffer_pool = "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES"
hdrm_pool = "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES"


class WmTables:
Expand All @@ -24,7 +23,7 @@ class WmTables:
class WmFCEntry:
queue_stats_entry = {"QUEUE_COUNTER_ID_LIST": SaiWmStats.queue_shared}
pg_stats_entry = {"PG_COUNTER_ID_LIST": "{},{}".format(SaiWmStats.pg_shared, SaiWmStats.pg_headroom)}
buffer_stats_entry = {"BUFFER_POOL_COUNTER_ID_LIST": "{},{}".format(SaiWmStats.buffer_pool, SaiWmStats.hdrm_pool)}
buffer_stats_entry = {"BUFFER_POOL_COUNTER_ID_LIST": SaiWmStats.buffer_pool}


class TestWatermark(object):
Expand Down Expand Up @@ -81,7 +80,6 @@ def populate_asic_all(self, dvs, val):
self.populate_asic(dvs, "SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", SaiWmStats.pg_shared, val)
self.populate_asic(dvs, "SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", SaiWmStats.pg_headroom, val)
self.populate_asic(dvs, "SAI_OBJECT_TYPE_BUFFER_POOL", SaiWmStats.buffer_pool, val)
self.populate_asic(dvs, "SAI_OBJECT_TYPE_BUFFER_POOL", SaiWmStats.hdrm_pool, val)
time.sleep(self.DEFAULT_POLL_INTERVAL)

def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value):
Expand Down Expand Up @@ -183,7 +181,6 @@ def test_telemetry_period(self, dvs):
self.verify_value(dvs, self.pgs, WmTables.periodic, SaiWmStats.pg_headroom, "0")
self.verify_value(dvs, self.qs, WmTables.periodic, SaiWmStats.queue_shared, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.buffer_pool, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.hdrm_pool, "0")

self.populate_asic_all(dvs, "123")

Expand All @@ -196,7 +193,6 @@ def test_telemetry_period(self, dvs):
self.verify_value(dvs, self.pgs, WmTables.periodic, SaiWmStats.pg_headroom, "0")
self.verify_value(dvs, self.qs, WmTables.periodic, SaiWmStats.queue_shared, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.buffer_pool, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.hdrm_pool, "0")

finally:
self.clear_flex_counter(dvs)
Expand All @@ -218,7 +214,6 @@ def test_lua_plugins(self, dvs):
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_headroom, "192")
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_shared, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.buffer_pool, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.hdrm_pool, "192")

self.populate_asic_all(dvs, "96")

Expand All @@ -227,7 +222,6 @@ def test_lua_plugins(self, dvs):
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_headroom, "192")
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_shared, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.buffer_pool, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.hdrm_pool, "192")

self.populate_asic_all(dvs, "288")

Expand All @@ -236,7 +230,6 @@ def test_lua_plugins(self, dvs):
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_headroom, "288")
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_shared, "288")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.buffer_pool, "288")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.hdrm_pool, "288")

finally:
self.clear_flex_counter(dvs)
Expand Down

0 comments on commit cadf28f

Please sign in to comment.