Skip to content

Commit

Permalink
Merge branch 'master' of github.com:sonic-net/sonic-swss into dash-pl
Browse files Browse the repository at this point in the history
  • Loading branch information
theasianpianist committed Aug 7, 2024
2 parents 0c26e58 + 465391d commit 08e7bd3
Show file tree
Hide file tree
Showing 209 changed files with 15,562 additions and 2,746 deletions.
1 change: 1 addition & 0 deletions .azure-pipelines/build-docker-sonic-vs-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ jobs:
artifact: ${{ parameters.sairedis_artifact_name }}
runVersion: 'latestFromBranch'
runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}'
allowPartiallySucceededBuilds: true
path: $(Build.ArtifactStagingDirectory)/download/sairedis
patterns: |
${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb
Expand Down
2 changes: 1 addition & 1 deletion .azure-pipelines/build-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ parameters:
- name: pool
type: string
values:
- sonicbld
- sonicbld-1es
- sonicbld-armhf
- sonicbld-arm64
- default
Expand Down
62 changes: 30 additions & 32 deletions .azure-pipelines/build_and_install_module.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,62 +26,60 @@ function build_and_install_kmodule()
SUBLEVEL=$(echo $KERNEL_MAINVERSION | cut -d. -f3)

# Install the required debian packages to build the kernel modules
apt-get update
apt-get install -y build-essential linux-headers-${KERNEL_RELEASE} autoconf pkg-config fakeroot
apt-get install -y flex bison libssl-dev libelf-dev
apt-get install -y flex bison libssl-dev libelf-dev dwarves
apt-get install -y libnl-route-3-200 libnl-route-3-dev libnl-cli-3-200 libnl-cli-3-dev libnl-3-dev

# Add the apt source mirrors and download the linux image source code
cp /etc/apt/sources.list /etc/apt/sources.list.bk
sed -i "s/^# deb-src/deb-src/g" /etc/apt/sources.list
apt-get update
apt-get source linux-image-unsigned-$(uname -r) > source.log
KERNEL_PACKAGE_SOURCE=$(apt-cache show linux-image-unsigned-${KERNEL_RELEASE} | grep ^Source: | cut -d':' -f 2)
KERNEL_PACKAGE_VERSION=$(apt-cache show linux-image-unsigned-${KERNEL_RELEASE} | grep ^Version: | cut -d':' -f 2)
SOURCE_PACKAGE_VERSION=$(apt-cache showsrc ${KERNEL_PACKAGE_SOURCE} | grep ^Version: | cut -d':' -f 2)
if [ ${KERNEL_PACKAGE_VERSION} != ${SOURCE_PACKAGE_VERSION} ]; then
echo "WARNING: the running kernel version (${KERNEL_PACKAGE_VERSION}) doesn't match the source package " \
"version (${SOURCE_PACKAGE_VERSION}) being downloaded. There's no guarantee the module being downloaded " \
"can be loaded into the kernel or function correctly. If possible, please update your kernel and reboot " \
"your system so that it's running the matching kernel version." >&2
echo "Continuing with the build anyways" >&2
fi
apt-get source linux-image-unsigned-${KERNEL_RELEASE} > source.log

# Recover the original apt sources list
cp /etc/apt/sources.list.bk /etc/apt/sources.list
apt-get update

# Build the Linux kernel module drivers/net/team and vrf
cd $(find . -maxdepth 1 -type d | grep -v "^.$")
if [ -e debian/debian.env ]; then
source debian/debian.env
if [ -n "${DEBIAN}" -a -e ${DEBIAN}/reconstruct ]; then
bash ${DEBIAN}/reconstruct
fi
fi
make allmodconfig
mv .config .config.bk
cp /boot/config-$(uname -r) .config
grep NET_TEAM .config.bk >> .config
echo CONFIG_NET_VRF=m >> .config
echo CONFIG_MACSEC=m >> .config
echo CONFIG_NET_VENDOR_MICROSOFT=y >> .config
echo CONFIG_MICROSOFT_MANA=m >> .config
echo CONFIG_SYSTEM_REVOCATION_LIST=n >> .config
make VERSION=$VERSION PATCHLEVEL=$PATCHLEVEL SUBLEVEL=$SUBLEVEL EXTRAVERSION=-${EXTRAVERSION} LOCALVERSION=-${LOCALVERSION} modules_prepare
make M=drivers/net/team
cp /usr/src/linux-headers-$(uname -r)/Module.symvers .
make -j$(nproc) M=drivers/net/team
mv drivers/net/Makefile drivers/net/Makefile.bak
echo 'obj-$(CONFIG_NET_VRF) += vrf.o' > drivers/net/Makefile
echo 'obj-$(CONFIG_MACSEC) += macsec.o' >> drivers/net/Makefile
make M=drivers/net
make -j$(nproc) M=drivers/net

# Install the module
TEAM_DIR=$(echo /lib/modules/$(uname -r)/kernel/net/team)
NET_DIR=$(echo /lib/modules/$(uname -r)/kernel/net)
if [ ! -e "$TEAM_DIR/team.ko" ]; then
mkdir -p $TEAM_DIR
cp drivers/net/team/*.ko $TEAM_DIR/
modinfo $TEAM_DIR/team.ko
depmod
modprobe team
fi
if [ ! -e "$NET_DIR/vrf.ko" ]; then
mkdir -p $NET_DIR
cp drivers/net/vrf.ko $NET_DIR/
modinfo $NET_DIR/vrf.ko
depmod
modprobe vrf
fi
if [ ! -e "$NET_DIR/macsec.ko" ]; then
mkdir -p $NET_DIR
cp drivers/net/macsec.ko $NET_DIR/
modinfo $NET_DIR/macsec.ko
depmod
modprobe macsec
fi
SONIC_MODULES_DIR=/lib/modules/$(uname -r)/updates/sonic
mkdir -p $SONIC_MODULES_DIR
cp drivers/net/team/*.ko drivers/net/vrf.ko drivers/net/macsec.ko $SONIC_MODULES_DIR/
depmod
modinfo team vrf macsec
modprobe team
modprobe vrf
modprobe macsec

cd /tmp
rm -rf $WORKDIR
Expand Down
2 changes: 1 addition & 1 deletion .azure-pipelines/gcov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ parameters:
- name: pool
type: string
values:
- sonicbld
- sonicbld-1es
- default
default: default

Expand Down
7 changes: 4 additions & 3 deletions .azure-pipelines/test-docker-sonic-vs-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
DIFF_COVER_ENABLE: 'true'
DIFF_COVER_COVERAGE_FILES: Cobertura.xml

pool: sonic-common-test
pool: sonictest

steps:
- script: |
Expand Down Expand Up @@ -91,7 +91,8 @@ jobs:
sudo apt-add-repository https://packages.microsoft.com/ubuntu/20.04/prod
sudo apt-get update
sudo apt-get install -y dotnet-sdk-7.0
sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin
sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin 2>&1 | tee log.log || grep 'already installed' log.log
rm log.log
displayName: "Install .NET CORE"
- script: |
Expand All @@ -107,7 +108,7 @@ jobs:
# install packages for vs test
sudo apt-get install -y net-tools bridge-utils vlan
sudo apt-get install -y python3-pip
sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker>=4.4.1 redis==3.3.4 flaky==3.7.0
sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker>=4.4.1 redis==3.3.4 flaky==3.7.0 requests==2.31.0
sudo pip3 install lcov_cobertura
displayName: "Install dependencies"
Expand Down
43 changes: 41 additions & 2 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ stages:
- template: .azure-pipelines/build-template.yml
parameters:
arch: amd64
pool: sonicbld
pool: sonicbld-1es
sonic_slave: sonic-slave-bullseye
common_lib_artifact_name: common-lib
swss_common_artifact_name: sonic-swss-common
Expand All @@ -56,7 +56,7 @@ stages:
- template: .azure-pipelines/build-template.yml
parameters:
arch: amd64
pool: sonicbld
pool: sonicbld-1es
sonic_slave: sonic-slave-bullseye
common_lib_artifact_name: common-lib
swss_common_artifact_name: sonic-swss-common
Expand Down Expand Up @@ -92,6 +92,45 @@ stages:
artifact_name: sonic-swss.arm64
archive_gcov: false

- stage: BuildBookworm
dependsOn: BuildArm
condition: succeeded('BuildArm')
jobs:
- template: .azure-pipelines/build-template.yml
parameters:
arch: amd64
pool: sonicbld-1es
sonic_slave: sonic-slave-bookworm
common_lib_artifact_name: common-lib
swss_common_artifact_name: sonic-swss-common-bookworm
sairedis_artifact_name: sonic-sairedis-bookworm
artifact_name: sonic-swss-bookworm
archive_gcov: false

- template: .azure-pipelines/build-template.yml
parameters:
arch: armhf
timeout: 240
pool: sonicbld-armhf
sonic_slave: sonic-slave-bookworm-armhf
common_lib_artifact_name: common-lib.armhf
swss_common_artifact_name: sonic-swss-common-bookworm.armhf
sairedis_artifact_name: sonic-sairedis-bookworm.armhf
artifact_name: sonic-swss-bookworm.armhf
archive_gcov: false

- template: .azure-pipelines/build-template.yml
parameters:
arch: arm64
timeout: 240
pool: sonicbld-arm64
sonic_slave: sonic-slave-bookworm-arm64
common_lib_artifact_name: common-lib.arm64
swss_common_artifact_name: sonic-swss-common-bookworm.arm64
sairedis_artifact_name: sonic-sairedis-bookworm.arm64
artifact_name: sonic-swss-bookworm.arm64
archive_gcov: false

- stage: BuildDocker
dependsOn: Build
condition: succeeded('Build')
Expand Down
2 changes: 1 addition & 1 deletion cfgmgr/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ CFLAGS_SAI = -I /usr/include/sai
LIBNL_CFLAGS = -I/usr/include/libnl3
LIBNL_LIBS = -lnl-genl-3 -lnl-route-3 -lnl-3
SAIMETA_LIBS = -lsaimeta -lsaimetadata -lzmq
COMMON_LIBS = -lswsscommon
COMMON_LIBS = -lswsscommon -lpthread

bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd fabricmgrd

Expand Down
36 changes: 28 additions & 8 deletions cfgmgr/buffer_pool_mellanox.lua
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ local function iterate_profile_list(all_items)
return 0
end

local function fetch_buffer_pool_size_from_appldb()
local function fetch_buffer_pool_size_from_appldb(shp_enabled)
local buffer_pools = {}
redis.call('SELECT', config_db)
local buffer_pool_keys = redis.call('KEYS', 'BUFFER_POOL|*')
Expand All @@ -158,7 +158,18 @@ local function fetch_buffer_pool_size_from_appldb()
end
xoff = redis.call('HGET', 'BUFFER_POOL_TABLE:' .. buffer_pools[i], 'xoff')
if not xoff then
table.insert(result, buffer_pools[i] .. ':' .. size)
if shp_enabled and size == "0" and buffer_pools[i] == "ingress_lossless_pool" then
-- During initialization, if SHP is enabled
-- 1. the buffer pool sizes, xoff have initialized to 0, which means the shared headroom pool is disabled
-- 2. but the buffer profiles already indicate the shared headroom pool is enabled
-- 3. later on the buffer pool sizes are updated with xoff being non-zero
-- In case the orchagent starts handling buffer configuration between 2 and 3,
-- It is inconsistent between buffer pools and profiles, which fails Mellanox SAI sanity check
-- To avoid it, it indicates the shared headroom pool is enabled by setting a very small buffer pool and shared headroom pool sizes
table.insert(result, buffer_pools[i] .. ':2048:1024')
else
table.insert(result, buffer_pools[i] .. ':' .. size)
end
else
table.insert(result, buffer_pools[i] .. ':' .. size .. ':' .. xoff)
end
Expand Down Expand Up @@ -295,7 +306,7 @@ local fail_count = 0
fail_count = fail_count + iterate_all_items(all_pgs, true)
fail_count = fail_count + iterate_all_items(all_tcs, false)
if fail_count > 0 then
fetch_buffer_pool_size_from_appldb()
fetch_buffer_pool_size_from_appldb(shp_enabled)
return result
end

Expand All @@ -305,7 +316,7 @@ local all_egress_profile_lists = redis.call('KEYS', 'BUFFER_PORT_EGRESS_PROFILE_
fail_count = fail_count + iterate_profile_list(all_ingress_profile_lists)
fail_count = fail_count + iterate_profile_list(all_egress_profile_lists)
if fail_count > 0 then
fetch_buffer_pool_size_from_appldb()
fetch_buffer_pool_size_from_appldb(shp_enabled)
return result
end

Expand Down Expand Up @@ -406,10 +417,12 @@ local pool_size
if shp_size then
accumulative_occupied_buffer = accumulative_occupied_buffer + shp_size
end

local available_buffer = mmu_size - accumulative_occupied_buffer
if ingress_pool_count == 1 then
pool_size = mmu_size - accumulative_occupied_buffer
pool_size = available_buffer
else
pool_size = (mmu_size - accumulative_occupied_buffer) / 2
pool_size = available_buffer / 2
end

if pool_size > ceiling_mmu_size then
Expand All @@ -418,12 +431,19 @@ end

local shp_deployed = false
for i = 1, #pools_need_update, 1 do
local percentage = tonumber(redis.call('HGET', pools_need_update[i], 'percentage'))
local effective_pool_size
if percentage ~= nil and percentage >= 0 then
effective_pool_size = available_buffer * percentage / 100
else
effective_pool_size = pool_size
end
local pool_name = string.match(pools_need_update[i], "BUFFER_POOL|([^%s]+)$")
if shp_size ~= 0 and pool_name == "ingress_lossless_pool" then
table.insert(result, pool_name .. ":" .. math.ceil(pool_size) .. ":" .. math.ceil(shp_size))
table.insert(result, pool_name .. ":" .. math.ceil(effective_pool_size) .. ":" .. math.ceil(shp_size))
shp_deployed = true
else
table.insert(result, pool_name .. ":" .. math.ceil(pool_size))
table.insert(result, pool_name .. ":" .. math.ceil(effective_pool_size))
end
end

Expand Down
35 changes: 17 additions & 18 deletions cfgmgr/buffermgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -549,24 +549,23 @@ void BufferMgr::doTask(Consumer &consumer)
task_status = doSpeedUpdateTask(port);
}
}

switch (task_status)
{
case task_process_status::task_failed:
SWSS_LOG_ERROR("Failed to process table update");
return;
case task_process_status::task_need_retry:
SWSS_LOG_INFO("Unable to process table update. Will retry...");
++it;
break;
case task_process_status::task_invalid_entry:
SWSS_LOG_ERROR("Failed to process invalid entry, drop it");
it = consumer.m_toSync.erase(it);
break;
default:
it = consumer.m_toSync.erase(it);
break;
}
}
switch (task_status)
{
case task_process_status::task_failed:
SWSS_LOG_ERROR("Failed to process table update");
return;
case task_process_status::task_need_retry:
SWSS_LOG_INFO("Unable to process table update. Will retry...");
++it;
break;
case task_process_status::task_invalid_entry:
SWSS_LOG_ERROR("Failed to process invalid entry, drop it");
it = consumer.m_toSync.erase(it);
break;
default:
it = consumer.m_toSync.erase(it);
break;
}
}
}
2 changes: 1 addition & 1 deletion cfgmgr/buffermgrd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void dump_db_item(KeyOpFieldsValuesTuple &db_item)

void write_to_state_db(shared_ptr<vector<KeyOpFieldsValuesTuple>> db_items_ptr)
{
DBConnector db("STATE_DB", 0, true);
DBConnector db("STATE_DB", 0);
auto &db_items = *db_items_ptr;
for (auto &db_item : db_items)
{
Expand Down
18 changes: 9 additions & 9 deletions cfgmgr/buffermgrdyn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -934,15 +934,6 @@ void BufferMgrDynamic::updateBufferObjectToDb(const string &key, const string &p
void BufferMgrDynamic::updateBufferObjectListToDb(const string &key, const string &profileList, buffer_direction_t dir)
{
auto &table = m_applBufferProfileListTables[dir];
const auto &direction = m_bufferDirectionNames[dir];

if (!m_bufferPoolReady)
{
SWSS_LOG_NOTICE("Buffer pools are not ready when configuring buffer %s profile list %s, pending", direction.c_str(), key.c_str());
m_bufferObjectsPending = true;
return;
}

vector<FieldValueTuple> fvVector;

fvVector.emplace_back(buffer_profile_list_field_name, profileList);
Expand Down Expand Up @@ -3245,6 +3236,15 @@ task_process_status BufferMgrDynamic::handleSingleBufferPortProfileListEntry(con
}
}

if (!m_bufferPoolReady)
{
const auto &direction = m_bufferDirectionNames[dir];

SWSS_LOG_NOTICE("Buffer pools are not ready when configuring buffer %s profile list %s, pending", direction.c_str(), key.c_str());
m_bufferObjectsPending = true;
return task_process_status::task_success;
}

auto &portInfo = m_portInfoLookup[port];
if (PORT_ADMIN_DOWN != portInfo.state)
{
Expand Down
Loading

0 comments on commit 08e7bd3

Please sign in to comment.